Upload gpu_processing.py with huggingface_hub
Browse files- gpu_processing.py +107 -0
gpu_processing.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from datasets import Dataset, DatasetDict, load_dataset
|
3 |
+
from datasets.features import Audio
|
4 |
+
import pandas as pd
|
5 |
+
import torch
|
6 |
+
from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor
|
7 |
+
import gc # for garbage collection
|
8 |
+
|
9 |
+
# Function to load your custom dataset
|
10 |
+
def load_custom_dataset(data_dir):
|
11 |
+
data = {
|
12 |
+
"audio": [],
|
13 |
+
"text": []
|
14 |
+
}
|
15 |
+
|
16 |
+
wav_dir = os.path.join(data_dir, 'wav')
|
17 |
+
txt_dir = os.path.join(data_dir, 'transcription')
|
18 |
+
|
19 |
+
# Assuming filenames in 'wav' and 'txt' match
|
20 |
+
for wav_file in os.listdir(wav_dir):
|
21 |
+
if wav_file.endswith('.wav'):
|
22 |
+
txt_file = wav_file.replace('.wav', '.txt')
|
23 |
+
wav_path = os.path.join(wav_dir, wav_file)
|
24 |
+
txt_path = os.path.join(txt_dir, txt_file)
|
25 |
+
|
26 |
+
# Read the transcription text
|
27 |
+
with open(txt_path, 'r', encoding='utf-8') as f:
|
28 |
+
transcription = f.read().strip()
|
29 |
+
|
30 |
+
# Append to the dataset
|
31 |
+
data["audio"].append(wav_path)
|
32 |
+
data["text"].append(transcription)
|
33 |
+
|
34 |
+
# Create a pandas dataframe
|
35 |
+
df = pd.DataFrame(data)
|
36 |
+
|
37 |
+
# Convert to a Hugging Face dataset
|
38 |
+
dataset = Dataset.from_pandas(df)
|
39 |
+
|
40 |
+
# Define the audio feature (for .wav files)
|
41 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
|
42 |
+
|
43 |
+
return dataset
|
44 |
+
|
45 |
+
# Load your custom dataset
|
46 |
+
custom_train_dataset = load_custom_dataset("./")
|
47 |
+
|
48 |
+
# Load Common Voice test set (Malayalam)
|
49 |
+
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
|
50 |
+
|
51 |
+
common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
|
52 |
+
|
53 |
+
# Combine them into a DatasetDict
|
54 |
+
dataset_dict = DatasetDict({
|
55 |
+
"train": custom_train_dataset,
|
56 |
+
"test": common_voice_test
|
57 |
+
})
|
58 |
+
|
59 |
+
# Load Whisper models for feature extraction and tokenization
|
60 |
+
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
|
61 |
+
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
|
62 |
+
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
|
63 |
+
|
64 |
+
# Check if GPU is available
|
65 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
66 |
+
print(f"Using device: {device}")
|
67 |
+
|
68 |
+
# Function to prepare dataset (with GPU support)
|
69 |
+
from torch.amp import autocast
|
70 |
+
|
71 |
+
def prepare_dataset(batch):
|
72 |
+
audio_arrays = [item["array"] for item in batch["audio"]]
|
73 |
+
sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
|
74 |
+
|
75 |
+
features = []
|
76 |
+
for audio, sr in zip(audio_arrays, sampling_rates):
|
77 |
+
# Move audio to GPU and convert to tensor
|
78 |
+
audio_tensor = torch.tensor(audio).to(device)
|
79 |
+
|
80 |
+
# Use autocast for mixed precision
|
81 |
+
with autocast('cuda'):
|
82 |
+
audio_tensor_cpu = audio_tensor.cpu().numpy() # Convert to NumPy for feature extraction
|
83 |
+
|
84 |
+
feature = feature_extractor(audio_tensor_cpu, sampling_rate=sr).input_features[0]
|
85 |
+
features.append(feature)
|
86 |
+
|
87 |
+
# Clean up memory
|
88 |
+
del audio_tensor
|
89 |
+
gc.collect()
|
90 |
+
|
91 |
+
batch["input_features"] = features
|
92 |
+
batch["labels"] = [tokenizer(text).input_ids for text in batch["text"]]
|
93 |
+
return batch
|
94 |
+
|
95 |
+
# Use Dataset.map to apply the function
|
96 |
+
dataset_dict = dataset_dict.map(
|
97 |
+
prepare_dataset,
|
98 |
+
remove_columns=dataset_dict.column_names["train"],
|
99 |
+
batch_size=1024, # Process smaller batches
|
100 |
+
batched=True, # Enable batched processing
|
101 |
+
)
|
102 |
+
|
103 |
+
# Save the processed dataset to disk
|
104 |
+
dataset_dict.save_to_disk("processed_dataset")
|
105 |
+
|
106 |
+
# Check a sample from the train set
|
107 |
+
print(dataset_dict['train'][0])
|