Spaces:
Running
Running
Update tts_utils.py
Browse files- tts_utils.py +13 -2
tts_utils.py
CHANGED
@@ -3,14 +3,25 @@ import torch
|
|
3 |
from parler_tts import ParlerTTSForConditionalGeneration
|
4 |
from transformers import AutoTokenizer
|
5 |
|
|
|
6 |
def load_model():
|
7 |
model = ParlerTTSForConditionalGeneration.from_pretrained(
|
8 |
"ai4bharat/indic-parler-tts",
|
9 |
-
torch_dtype=torch.
|
10 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-parler-tts")
|
12 |
description_tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-parler-tts")
|
13 |
-
|
|
|
|
|
14 |
|
15 |
def generate_speech(text, voice_prompt, model, tokenizer, description_tokenizer):
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
3 |
from parler_tts import ParlerTTSForConditionalGeneration
|
4 |
from transformers import AutoTokenizer
|
5 |
|
6 |
+
# Updated load_model function in tts_utils.py
|
7 |
def load_model():
|
8 |
model = ParlerTTSForConditionalGeneration.from_pretrained(
|
9 |
"ai4bharat/indic-parler-tts",
|
10 |
+
torch_dtype=torch.float32 # Force CPU-compatible dtype
|
11 |
)
|
12 |
+
|
13 |
+
# Apply dynamic quantization to Linear layers
|
14 |
+
quantized_model = torch.ao.quantization.quantize_dynamic(
|
15 |
+
model,
|
16 |
+
{torch.nn.Linear}, # Target layer type
|
17 |
+
dtype=torch.qint8
|
18 |
+
)
|
19 |
+
|
20 |
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-parler-tts")
|
21 |
description_tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-parler-tts")
|
22 |
+
|
23 |
+
return quantized_model, tokenizer, description_tokenizer
|
24 |
+
|
25 |
|
26 |
def generate_speech(text, voice_prompt, model, tokenizer, description_tokenizer):
|
27 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|