|
import os |
|
import torch |
|
from torch.utils.data import Dataset, DataLoader |
|
from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel |
|
from transformers import Wav2Vec2Processor, Wav2Vec2Model |
|
import torchaudio |
|
from torch.optim.lr_scheduler import CosineAnnealingLR |
|
from sklearn.model_selection import train_test_split |
|
from torchaudio.transforms import Resample |
|
from torch.amp import GradScaler, autocast |
|
from tqdm import tqdm |
|
from jiwer import wer |
|
from safetensors.torch import save_file |
|
|
|
|
|
def compute_max_audio_length(audio_files, resampler, target_sampling_rate): |
|
max_length = 0 |
|
for audio_path in audio_files: |
|
waveform, sample_rate = torchaudio.load(audio_path) |
|
if sample_rate != target_sampling_rate: |
|
waveform = resampler(waveform) |
|
max_length = max(max_length, waveform.size(1)) |
|
return max_length |
|
|
|
class SpeechDataset(Dataset): |
|
def __init__(self, audio_files, transcript_files, tokenizer, processor, max_length=512, target_sampling_rate=16000, max_audio_length=None): |
|
self.audio_files = audio_files |
|
self.transcript_files = transcript_files |
|
self.tokenizer = tokenizer |
|
self.processor = processor |
|
self.max_length = max_length |
|
self.target_sampling_rate = target_sampling_rate |
|
self.max_audio_length = max_audio_length |
|
self.resampler = Resample(new_freq=self.target_sampling_rate) |
|
|
|
def __len__(self): |
|
return len(self.audio_files) |
|
|
|
def __getitem__(self, idx): |
|
audio_path = self.audio_files[idx] |
|
transcript_path = self.transcript_files[idx] |
|
|
|
|
|
waveform, sample_rate = torchaudio.load(audio_path) |
|
|
|
|
|
if sample_rate != self.target_sampling_rate: |
|
waveform = self.resampler(waveform) |
|
|
|
|
|
input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0) |
|
|
|
|
|
if input_values.size(1) < self.max_audio_length: |
|
padding_length = self.max_audio_length - input_values.size(1) |
|
|
|
padding = torch.zeros_like(input_values[:, :padding_length]) |
|
input_values = torch.cat([input_values, padding], dim=1) |
|
else: |
|
input_values = input_values[:, :self.max_audio_length] |
|
|
|
|
|
|
|
with open(transcript_path, 'r') as file: |
|
transcript = file.read().strip() |
|
|
|
|
|
input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0) |
|
|
|
return input_values, input_ids |
|
|
|
def collate_fn(batch): |
|
audio_inputs, text_inputs = zip(*batch) |
|
|
|
|
|
audio_inputs = [audio.squeeze(0) if audio.dim() == 3 else audio for audio in audio_inputs] |
|
|
|
|
|
max_audio_len = max([audio.size(1) for audio in audio_inputs]) |
|
|
|
audio_inputs_padded = [] |
|
for audio in audio_inputs: |
|
if audio.size(1) < max_audio_len: |
|
padding = torch.zeros(audio.size(0), max_audio_len - audio.size(1), device=audio.device) |
|
padded_audio = torch.cat([audio, padding], dim=1) |
|
audio_inputs_padded.append(padded_audio) |
|
else: |
|
audio_inputs_padded.append(audio[:, :max_audio_len]) |
|
|
|
|
|
audio_inputs_padded = torch.stack(audio_inputs_padded) |
|
|
|
|
|
max_text_len = max([text.size(0) for text in text_inputs]) |
|
text_inputs_padded = torch.stack([ |
|
torch.cat([text, torch.zeros(max_text_len - text.size(0), dtype=text.dtype)], dim=0) |
|
if text.size(0) < max_text_len |
|
else text[:max_text_len] |
|
for text in text_inputs |
|
]) |
|
|
|
return audio_inputs_padded, text_inputs_padded |
|
|
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') |
|
processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h') |
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
wav_folder = './wav' |
|
transcript_folder = './transcription' |
|
|
|
|
|
audio_files = [os.path.join(wav_folder, f) for f in os.listdir(wav_folder)] |
|
transcript_files = [os.path.join(transcript_folder, f.replace('.wav', '.txt')) for f in os.listdir(wav_folder)] |
|
|
|
|
|
resampler = Resample(new_freq=16000) |
|
max_audio_length = compute_max_audio_length(audio_files, resampler, target_sampling_rate=16000) |
|
print(max_audio_length) |
|
|
|
|
|
train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42) |
|
|
|
|
|
train_dataset = SpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length) |
|
val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length) |
|
|
|
|
|
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn) |
|
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn) |
|
|
|
|
|
encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h') |
|
decoder_config = GPT2Config(vocab_size=len(tokenizer)) |
|
decoder_config.add_cross_attention=True |
|
decoder = GPT2LMHeadModel(config=decoder_config) |
|
|
|
|
|
class SpeechRecognitionModel(torch.nn.Module): |
|
def __init__(self, encoder, decoder): |
|
super().__init__() |
|
self.encoder = encoder |
|
self.decoder = decoder |
|
|
|
self.encoder.gradient_checkpointing_enable() |
|
self.decoder.gradient_checkpointing_enable() |
|
|
|
def forward(self, audio_input, text_input): |
|
|
|
if audio_input.dim() == 4: |
|
audio_input = audio_input.squeeze(1) |
|
|
|
|
|
encoder_output = self.encoder(audio_input).last_hidden_state |
|
|
|
|
|
encoder_attention_mask = torch.ones( |
|
encoder_output.shape[:2], |
|
dtype=torch.long, |
|
device=encoder_output.device |
|
) |
|
|
|
|
|
outputs = self.decoder( |
|
input_ids=text_input, |
|
encoder_hidden_states=encoder_output, |
|
encoder_attention_mask=encoder_attention_mask, |
|
use_cache=False |
|
) |
|
|
|
return outputs |
|
|
|
def compute_wer(model, val_loader, device, tokenizer): |
|
model.eval() |
|
total_wer = 0 |
|
with torch.no_grad(): |
|
for audio_input, text_input in val_loader: |
|
audio_input = audio_input.to(device) |
|
text_input = text_input.to(device) |
|
|
|
|
|
outputs = model.decoder.generate( |
|
encoder_hidden_states=model.encoder(audio_input).last_hidden_state, |
|
max_length=text_input.size(1) |
|
) |
|
|
|
|
|
predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
ground_truth = tokenizer.batch_decode(text_input, skip_special_tokens=True) |
|
|
|
|
|
batch_wer = wer(ground_truth, predictions) |
|
total_wer += batch_wer |
|
|
|
return total_wer / len(val_loader) |
|
|
|
|
|
def train_model(num_epochs=10, accumulation_steps=16): |
|
|
|
|
|
encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h') |
|
|
|
|
|
decoder_config = GPT2Config( |
|
vocab_size=len(tokenizer), |
|
add_cross_attention=True |
|
) |
|
decoder = GPT2LMHeadModel(config=decoder_config) |
|
|
|
|
|
model = SpeechRecognitionModel(encoder, decoder) |
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
model = model.to(device) |
|
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) |
|
|
|
scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=1e-6) |
|
|
|
|
|
scaler = GradScaler() |
|
|
|
|
|
for epoch in tqdm(range(num_epochs), desc="Epochs", position=0): |
|
model.train() |
|
train_loss = 0 |
|
|
|
train_progress = tqdm(train_loader, desc=f"Epoch {epoch+1}", position=1, leave=False) |
|
|
|
for batch_idx, (audio_input, text_input) in enumerate(train_progress): |
|
|
|
|
|
audio_input = audio_input.squeeze(1).to(device) |
|
|
|
if audio_input.dim() == 4: |
|
audio_input = audio_input.squeeze(1) |
|
text_input = text_input.to(device) |
|
|
|
|
|
with autocast(dtype=torch.float16, device_type='cuda'): |
|
|
|
output = model(audio_input, text_input) |
|
|
|
|
|
loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1)) |
|
|
|
loss = loss / accumulation_steps |
|
|
|
scaler.scale(loss).backward() |
|
if (batch_idx + 1) % accumulation_steps == 0: |
|
scaler.step(optimizer) |
|
scaler.update() |
|
optimizer.zero_grad() |
|
|
|
train_progress.set_postfix({'loss': loss.item()}) |
|
train_loss += loss.item() |
|
|
|
|
|
if batch_idx % accumulation_steps != 0: |
|
scaler.step(optimizer) |
|
scaler.update() |
|
optimizer.zero_grad() |
|
|
|
|
|
model.eval() |
|
val_loss = 0 |
|
wer = compute_wer(model, val_loader, device, tokenizer) |
|
val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False) |
|
|
|
with torch.no_grad(), autocast(dtype=torch.float16): |
|
for audio_input, text_input in val_loader: |
|
audio_input = audio_input.to(device) |
|
text_input = text_input.to(device) |
|
output = model(audio_input, text_input) |
|
loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1)) |
|
|
|
val_progress.set_postfix({'loss': loss.item()}) |
|
val_loss += loss.item() |
|
|
|
|
|
scheduler.step(val_loss) |
|
|
|
print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}, WER: {wer}') |
|
|
|
if val_loss < best_val_loss: |
|
best_val_loss = val_loss |
|
|
|
|
|
save_file(model.state_dict(), 'best_model.safetensors') |
|
|
|
|
|
torch.save({ |
|
'optimizer_state_dict': optimizer.state_dict(), |
|
'epoch': epoch, |
|
'loss': val_loss, |
|
'model_class': model.__class__.__name__, |
|
'model_architecture': model.to(torch.device('cpu')) |
|
}, 'metadata.pth') |
|
|
|
|
|
train_model() |
|
|