Commit
·
15af85e
1
Parent(s):
a189878
Update README.md
Browse files
README.md
CHANGED
|
@@ -31,13 +31,13 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
|
|
| 31 |
To transcribe audio files the model can be used as a standalone acoustic model as follows:
|
| 32 |
|
| 33 |
```python
|
| 34 |
-
from transformers import
|
| 35 |
from datasets import load_dataset
|
| 36 |
import soundfile as sf
|
| 37 |
import torch
|
| 38 |
|
| 39 |
-
# load model and
|
| 40 |
-
|
| 41 |
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-100h")
|
| 42 |
|
| 43 |
# define function to read in sound file
|
|
@@ -51,14 +51,14 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
|
| 51 |
ds = ds.map(map_to_array)
|
| 52 |
|
| 53 |
# tokenize
|
| 54 |
-
input_values =
|
| 55 |
|
| 56 |
# retrieve logits
|
| 57 |
logits = model(input_values).logits
|
| 58 |
|
| 59 |
# take argmax and decode
|
| 60 |
predicted_ids = torch.argmax(logits, dim=-1)
|
| 61 |
-
transcription =
|
| 62 |
```
|
| 63 |
|
| 64 |
## Evaluation
|
|
@@ -67,7 +67,7 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
|
| 67 |
|
| 68 |
```python
|
| 69 |
from datasets import load_dataset
|
| 70 |
-
from transformers import Wav2Vec2ForCTC,
|
| 71 |
import soundfile as sf
|
| 72 |
import torch
|
| 73 |
from jiwer import wer
|
|
@@ -76,7 +76,7 @@ from jiwer import wer
|
|
| 76 |
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
| 77 |
|
| 78 |
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-100h").to("cuda")
|
| 79 |
-
|
| 80 |
|
| 81 |
def map_to_array(batch):
|
| 82 |
speech, _ = sf.read(batch["file"])
|
|
@@ -86,12 +86,12 @@ def map_to_array(batch):
|
|
| 86 |
librispeech_eval = librispeech_eval.map(map_to_array)
|
| 87 |
|
| 88 |
def map_to_pred(batch):
|
| 89 |
-
input_values =
|
| 90 |
with torch.no_grad():
|
| 91 |
logits = model(input_values.to("cuda")).logits
|
| 92 |
|
| 93 |
predicted_ids = torch.argmax(logits, dim=-1)
|
| 94 |
-
transcription =
|
| 95 |
batch["transcription"] = transcription
|
| 96 |
return batch
|
| 97 |
|
|
|
|
| 31 |
To transcribe audio files the model can be used as a standalone acoustic model as follows:
|
| 32 |
|
| 33 |
```python
|
| 34 |
+
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
| 35 |
from datasets import load_dataset
|
| 36 |
import soundfile as sf
|
| 37 |
import torch
|
| 38 |
|
| 39 |
+
# load model and processor
|
| 40 |
+
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-100h")
|
| 41 |
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-100h")
|
| 42 |
|
| 43 |
# define function to read in sound file
|
|
|
|
| 51 |
ds = ds.map(map_to_array)
|
| 52 |
|
| 53 |
# tokenize
|
| 54 |
+
input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
|
| 55 |
|
| 56 |
# retrieve logits
|
| 57 |
logits = model(input_values).logits
|
| 58 |
|
| 59 |
# take argmax and decode
|
| 60 |
predicted_ids = torch.argmax(logits, dim=-1)
|
| 61 |
+
transcription = processor.batch_decode(predicted_ids)
|
| 62 |
```
|
| 63 |
|
| 64 |
## Evaluation
|
|
|
|
| 67 |
|
| 68 |
```python
|
| 69 |
from datasets import load_dataset
|
| 70 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
| 71 |
import soundfile as sf
|
| 72 |
import torch
|
| 73 |
from jiwer import wer
|
|
|
|
| 76 |
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
| 77 |
|
| 78 |
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-100h").to("cuda")
|
| 79 |
+
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-100h")
|
| 80 |
|
| 81 |
def map_to_array(batch):
|
| 82 |
speech, _ = sf.read(batch["file"])
|
|
|
|
| 86 |
librispeech_eval = librispeech_eval.map(map_to_array)
|
| 87 |
|
| 88 |
def map_to_pred(batch):
|
| 89 |
+
input_values = processor(batch["speech"], return_tensors="pt", padding="longest").input_values
|
| 90 |
with torch.no_grad():
|
| 91 |
logits = model(input_values.to("cuda")).logits
|
| 92 |
|
| 93 |
predicted_ids = torch.argmax(logits, dim=-1)
|
| 94 |
+
transcription = processor.batch_decode(predicted_ids)
|
| 95 |
batch["transcription"] = transcription
|
| 96 |
return batch
|
| 97 |
|