asr_malayalam / eduport_tts_mal.py
aoxo's picture
Update eduport_tts_mal.py
6f4b789 verified
import os
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
from transformers import Wav2Vec2Processor, Wav2Vec2Model
import torchaudio
from torch.optim.lr_scheduler import CosineAnnealingLR
from sklearn.model_selection import train_test_split
from torchaudio.transforms import Resample
from torch.amp import GradScaler, autocast
from tqdm import tqdm
from jiwer import wer
from safetensors.torch import save_file
# Compute max audio length from the training dataset
def compute_max_audio_length(audio_files, resampler, target_sampling_rate):
max_length = 0
for audio_path in audio_files:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != target_sampling_rate:
waveform = resampler(waveform)
max_length = max(max_length, waveform.size(1)) # Max length based on time dimension
return max_length
class SpeechDataset(Dataset):
def __init__(self, audio_files, transcript_files, tokenizer, processor, max_length=512, target_sampling_rate=16000, max_audio_length=None):
self.audio_files = audio_files
self.transcript_files = transcript_files
self.tokenizer = tokenizer
self.processor = processor
self.max_length = max_length
self.target_sampling_rate = target_sampling_rate
self.max_audio_length = max_audio_length # Max length of audio
self.resampler = Resample(new_freq=self.target_sampling_rate)
def __len__(self):
return len(self.audio_files)
def __getitem__(self, idx):
audio_path = self.audio_files[idx]
transcript_path = self.transcript_files[idx]
# Load and process the audio
waveform, sample_rate = torchaudio.load(audio_path)
# If the audio sample rate is not 16kHz, resample it
if sample_rate != self.target_sampling_rate:
waveform = self.resampler(waveform)
# Pass the waveform to the Wav2Vec2 processor
input_values = self.processor(waveform, sampling_rate=self.target_sampling_rate, return_tensors="pt").input_values.squeeze(0)
# Pad or truncate the audio to ensure fixed length (the longest audio length)
if input_values.size(1) < self.max_audio_length:
padding_length = self.max_audio_length - input_values.size(1)
# Create a zero tensor with the same batch size (1) and the padding length along dimension 1
padding = torch.zeros_like(input_values[:, :padding_length])
input_values = torch.cat([input_values, padding], dim=1)
else:
input_values = input_values[:, :self.max_audio_length] # Truncate to max_audio_length
# Load and process the transcript
with open(transcript_path, 'r') as file:
transcript = file.read().strip()
# Encode the transcript using the GPT2 tokenizer
input_ids = self.tokenizer.encode(transcript, truncation=True, padding='longest', max_length=self.max_length, return_tensors="pt").squeeze(0)
return input_values, input_ids
def collate_fn(batch):
audio_inputs, text_inputs = zip(*batch)
# Ensure audio inputs are 3D
audio_inputs = [audio.squeeze(0) if audio.dim() == 3 else audio for audio in audio_inputs]
# Pad audio inputs to the maximum length in the batch
max_audio_len = max([audio.size(1) for audio in audio_inputs])
audio_inputs_padded = []
for audio in audio_inputs:
if audio.size(1) < max_audio_len:
padding = torch.zeros(audio.size(0), max_audio_len - audio.size(1), device=audio.device)
padded_audio = torch.cat([audio, padding], dim=1)
audio_inputs_padded.append(padded_audio)
else:
audio_inputs_padded.append(audio[:, :max_audio_len])
# Stack audio inputs
audio_inputs_padded = torch.stack(audio_inputs_padded)
# Pad text inputs
max_text_len = max([text.size(0) for text in text_inputs])
text_inputs_padded = torch.stack([
torch.cat([text, torch.zeros(max_text_len - text.size(0), dtype=text.dtype)], dim=0)
if text.size(0) < max_text_len
else text[:max_text_len]
for text in text_inputs
])
return audio_inputs_padded, text_inputs_padded
# Tokenizer and processor
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-base-960h')
tokenizer.pad_token = tokenizer.eos_token
# Data preparation
wav_folder = './wav'
transcript_folder = './transcription'
# Load audio files and transcripts
audio_files = [os.path.join(wav_folder, f) for f in os.listdir(wav_folder)]
transcript_files = [os.path.join(transcript_folder, f.replace('.wav', '.txt')) for f in os.listdir(wav_folder)]
# Now compute the max audio length
resampler = Resample(new_freq=16000) # Assuming resampling to 16kHz
max_audio_length = compute_max_audio_length(audio_files, resampler, target_sampling_rate=16000)
print(max_audio_length)
# Split the dataset into train, val, and test
train_audios, val_audios, train_transcripts, val_transcripts = train_test_split(audio_files, transcript_files, test_size=0.05, random_state=42)
# Define your dataset and dataloaders
train_dataset = SpeechDataset(train_audios, train_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
val_dataset = SpeechDataset(val_audios, val_transcripts, tokenizer, processor, max_audio_length=max_audio_length)
# Update your DataLoader to use the custom collate_fn
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
# Model Architecture
encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
decoder_config = GPT2Config(vocab_size=len(tokenizer))
decoder_config.add_cross_attention=True
decoder = GPT2LMHeadModel(config=decoder_config)
# Model Architecture with Improved FP16 Support
class SpeechRecognitionModel(torch.nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.encoder.gradient_checkpointing_enable()
self.decoder.gradient_checkpointing_enable()
def forward(self, audio_input, text_input):
# Ensure audio_input is 3D: [batch_size, channels, time]
if audio_input.dim() == 4:
audio_input = audio_input.squeeze(1) # Remove extra dimension
# Extract encoder hidden states
encoder_output = self.encoder(audio_input).last_hidden_state
# Create an attention mask for the encoder output
encoder_attention_mask = torch.ones(
encoder_output.shape[:2],
dtype=torch.long,
device=encoder_output.device
)
# Forward pass through the decoder with cross-attention
outputs = self.decoder(
input_ids=text_input,
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_attention_mask,
use_cache=False
)
return outputs
def compute_wer(model, val_loader, device, tokenizer):
model.eval()
total_wer = 0
with torch.no_grad():
for audio_input, text_input in val_loader:
audio_input = audio_input.to(device)
text_input = text_input.to(device)
# Generate predictions
outputs = model.decoder.generate(
encoder_hidden_states=model.encoder(audio_input).last_hidden_state,
max_length=text_input.size(1)
)
# Convert predictions and ground truth to text
predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True)
ground_truth = tokenizer.batch_decode(text_input, skip_special_tokens=True)
# Compute WER
batch_wer = wer(ground_truth, predictions)
total_wer += batch_wer
return total_wer / len(val_loader)
# Training Loop with Improved Mixed Precision
def train_model(num_epochs=10, accumulation_steps=16):
# Prepare the models
# Use float32 for most of the model, let autocast handle precision
encoder = Wav2Vec2Model.from_pretrained('facebook/wav2vec2-base-960h')
# Modify the decoder configuration
decoder_config = GPT2Config(
vocab_size=len(tokenizer),
add_cross_attention=True
)
decoder = GPT2LMHeadModel(config=decoder_config)
# Initialize the model
model = SpeechRecognitionModel(encoder, decoder)
# Move to GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
# Optimizer and learning rate scheduler
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=1e-6)
# Gradient scaler for mixed precision training
scaler = GradScaler()
# Training loop
for epoch in tqdm(range(num_epochs), desc="Epochs", position=0):
model.train()
train_loss = 0
train_progress = tqdm(train_loader, desc=f"Epoch {epoch+1}", position=1, leave=False)
for batch_idx, (audio_input, text_input) in enumerate(train_progress):
# Move tensors to device
audio_input = audio_input.squeeze(1).to(device)
# Squeeze or reshape if necessary
if audio_input.dim() == 4:
audio_input = audio_input.squeeze(1) # Remove extra singleton dimension
text_input = text_input.to(device)
# Use autocast for mixed precision training
with autocast(dtype=torch.float16, device_type='cuda'):
# Forward pass
output = model(audio_input, text_input)
# Compute loss
loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
loss = loss / accumulation_steps
# Scaled loss for mixed precision
scaler.scale(loss).backward()
if (batch_idx + 1) % accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
train_progress.set_postfix({'loss': loss.item()})
train_loss += loss.item()
# Ensure gradients are stepped for any remaining batches
if batch_idx % accumulation_steps != 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
# Validation step
model.eval()
val_loss = 0
wer = compute_wer(model, val_loader, device, tokenizer)
val_progress = tqdm(val_loader, desc=f"Validation {epoch+1}", position=1, leave=False)
with torch.no_grad(), autocast(dtype=torch.float16):
for audio_input, text_input in val_loader:
audio_input = audio_input.to(device)
text_input = text_input.to(device)
output = model(audio_input, text_input)
loss = torch.nn.CrossEntropyLoss()(output.logits.view(-1, output.logits.size(-1)), text_input.view(-1))
val_progress.set_postfix({'loss': loss.item()})
val_loss += loss.item()
# Update scheduler
scheduler.step(val_loss)
print(f'Epoch {epoch}: Train Loss: {train_loss / len(train_loader)}, Val Loss: {val_loss / len(val_loader)}, WER: {wer}')
if val_loss < best_val_loss:
best_val_loss = val_loss
# Save model state_dict to .safetensors
save_file(model.state_dict(), 'best_model.safetensors')
# Save optimizer state_dict and metadata separately
torch.save({
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch,
'loss': val_loss,
'model_class': model.__class__.__name__, # Save the class name
'model_architecture': model.to(torch.device('cpu')) # Save model architecture (serialized separately)
}, 'metadata.pth')
# Run the training
train_model()