Spaces:
Sleeping
Sleeping
| import torch | |
| from torch.utils.data import IterableDataset | |
| from torch.fft import fft | |
| from itertools import tee | |
| import random | |
| import torchaudio.transforms as T | |
| class SplitDataset(IterableDataset): | |
| def __init__(self, dataset, is_train=True, train_ratio=0.8): | |
| self.dataset = dataset | |
| self.is_train = is_train | |
| self.train_ratio = train_ratio | |
| def __iter__(self): | |
| count = 0 | |
| for item in self.dataset: | |
| # For first train_ratio portion of items, yield to train | |
| # For remaining items, yield to validation | |
| is_train_item = count < int(self.train_ratio * 100) | |
| if is_train_item == self.is_train: | |
| yield item | |
| count = (count + 1) % 100 | |
| class FFTDataset(IterableDataset): | |
| def __init__(self, original_dataset, orig_sample_rate=12000, target_sample_rate=6000): | |
| self.dataset = original_dataset | |
| self.resampler = T.Resample(orig_freq=orig_sample_rate, new_freq=target_sample_rate) | |
| def __iter__(self): | |
| for item in self.dataset: | |
| # Assuming your audio data is in item['audio'] | |
| # Modify this based on your actual data structure | |
| audio_data = torch.tensor(item['audio']['array']).float() | |
| if len(audio_data) == 0: | |
| continue | |
| resampled_audio = self.resampler(audio_data) | |
| fft_data = fft(resampled_audio) | |
| # Update the item with FFT data | |
| item['audio']['fft'] = fft_data | |
| yield item |