File size: 6,227 Bytes
1ddbd4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import os
import json
import librosa
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import WordPieceTrainer
import numpy as np

class MalayalamDatasetTokenizer:
    def __init__(self, transcription_dir, wav_dir, vocab_size=16000):
        """
        Initialize tokenizer with directories for transcriptions and audio files
        
        :param transcription_dir: Path to folder containing text transcriptions
        :param wav_dir: Path to folder containing WAV audio files
        :param vocab_size: Size of the vocabulary for text tokenization
        """
        self.transcription_dir = transcription_dir
        self.wav_dir = wav_dir
        
        # Initialize text tokenizer
        self.text_tokenizer = self._create_text_tokenizer(vocab_size)
        
        # Audio tokenization parameters
        self.audio_tokenizer = {
            "sample_rate": 16000,  # Standard for speech models
            "n_mfcc": 13,  # Number of MFCCs to extract
            "n_fft": 2048,  # FFT window size
            "hop_length": 512  # Hop length between frames
        }
    
    def _create_text_tokenizer(self, vocab_size):
        """
        Create a WordPiece tokenizer for Malayalam text
        """
        tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
        tokenizer.pre_tokenizer = Whitespace()
        
        special_tokens = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"]
        
        trainer = WordPieceTrainer(
            vocab_size=vocab_size,
            special_tokens=special_tokens
        )
        
        return tokenizer
    
    def _get_matched_files(self):
        """
        Find matching transcription and audio files
        
        :return: List of tuples (transcription_path, audio_path)
        """
        matched_files = []
        
        # Get all transcription files
        for trans_file in os.listdir(self.transcription_dir):
            # Remove extension to match with audio file
            base_name = os.path.splitext(trans_file)[0]
            
            # Check for corresponding WAV file
            wav_path = os.path.join(self.wav_dir, base_name + '.wav')
            trans_path = os.path.join(self.transcription_dir, trans_file)
            
            if os.path.exists(wav_path):
                matched_files.append((trans_path, wav_path))
        
        return matched_files
    
    def process_dataset(self):
        """
        Process entire dataset, tokenizing text and extracting audio features
        
        :return: Processed dataset with tokenized text and audio features
        """
        dataset = []
        matched_files = self._get_matched_files()
        
        for trans_path, wav_path in matched_files:
            # Read transcription
            with open(trans_path, 'r', encoding='utf-8') as f:
                transcription = f.read().strip()
            
            # Tokenize text
            text_tokens = self.text_tokenizer.encode(transcription).ids
            
            # Extract audio features
            audio_features = self._extract_audio_features(wav_path)
            
            dataset.append({
                'transcription': transcription,
                'text_tokens': text_tokens,
                'audio_features': audio_features,
                'audio_path': wav_path,
                'transcription_path': trans_path
            })
        
        return dataset
    
    def _extract_audio_features(self, audio_path):
        """
        Extract MFCC features from audio file
        
        :param audio_path: Path to WAV file
        :return: Extracted audio features
        """
        # Load audio file
        audio, sr = librosa.load(
            audio_path, 
            sr=self.audio_tokenizer['sample_rate']
        )
        
        # Extract MFCCs
        mfccs = librosa.feature.mfcc(
            y=audio, 
            sr=sr,
            n_mfcc=self.audio_tokenizer['n_mfcc'],
            n_fft=self.audio_tokenizer['n_fft'],
            hop_length=self.audio_tokenizer['hop_length']
        )
        
        return mfccs.T.tolist()
    
    def train_text_tokenizer(self):
        """
        Train text tokenizer on all transcription files
        """
        # Collect all transcriptions
        transcriptions = []
        for trans_path, _ in self._get_matched_files():
            with open(trans_path, 'r', encoding='utf-8') as f:
                transcriptions.append(f.read().strip())
        
        # Train tokenizer
        self.text_tokenizer.train_from_iterator(transcriptions)
    
    def save_dataset(self, output_path):
        """
        Save processed dataset to JSON
        
        :param output_path: Path to save processed dataset
        """
        dataset = self.process_dataset()
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(dataset, f, ensure_ascii=False, indent=2)
        
        print(f"Saved dataset to {output_path}")
    
    def save_tokenizer(self, output_dir):
        """
        Save tokenizer configurations
        
        :param output_dir: Directory to save tokenizer files
        """
        os.makedirs(output_dir, exist_ok=True)
        
        # Save text tokenizer vocabulary
        with open(os.path.join(output_dir, 'text_tokenizer.json'), 'w', encoding='utf-8') as f:
            json.dump({
                'vocab': self.text_tokenizer.get_vocab(),
                'model_type': 'WordPiece'
            }, f, ensure_ascii=False, indent=2)
        
        # Save audio tokenizer configuration
        with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
            json.dump(self.audio_tokenizer, f, indent=2)

# Example usage
if __name__ == "__main__":
    # Initialize tokenizer
    tokenizer = MalayalamDatasetTokenizer(
        transcription_dir='transcription', 
        wav_dir='wav'
    )
    
    # Train text tokenizer
    tokenizer.train_text_tokenizer()
    
    # Process and save dataset
#    tokenizer.save_dataset('malayalam_dataset.json')
    
    # Save tokenizer configurations
    tokenizer.save_tokenizer('malayalam_tokenizer')