retry
Browse files
app.py
CHANGED
@@ -16,18 +16,27 @@ logging.basicConfig(
|
|
16 |
format='%(asctime)s - %(levelname)s - %(message)s'
|
17 |
)
|
18 |
|
|
|
19 |
# Initialize translation model
|
20 |
-
checkpoint_dir = "bishaltwr/final_m2m100" # Change to Hugging Face model ID when deployed
|
21 |
try:
|
|
|
22 |
tokenizer = M2M100Tokenizer.from_pretrained(checkpoint_dir)
|
|
|
|
|
23 |
model_m2m = M2M100ForConditionalGeneration.from_pretrained(checkpoint_dir)
|
|
|
|
|
24 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
25 |
model_m2m.to(device)
|
26 |
m2m_available = True
|
|
|
27 |
except Exception as e:
|
28 |
logging.error(f"Error loading M2M100 model: {e}")
|
29 |
m2m_available = False
|
30 |
-
|
|
|
31 |
# Initialize ASR model
|
32 |
model_id = "bishaltwr/wav2vec2-large-mms-1b-nepali"
|
33 |
try:
|
|
|
16 |
format='%(asctime)s - %(levelname)s - %(message)s'
|
17 |
)
|
18 |
|
19 |
+
checkpoint_dir = "facebook/final_m2m100"
|
20 |
# Initialize translation model
|
|
|
21 |
try:
|
22 |
+
logging.info(f"Attempting to load M2M100 from {checkpoint_dir}")
|
23 |
tokenizer = M2M100Tokenizer.from_pretrained(checkpoint_dir)
|
24 |
+
logging.info("M2M100 tokenizer loaded successfully")
|
25 |
+
|
26 |
model_m2m = M2M100ForConditionalGeneration.from_pretrained(checkpoint_dir)
|
27 |
+
logging.info("M2M100 model loaded successfully")
|
28 |
+
|
29 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
30 |
+
logging.info(f"Using device: {device}")
|
31 |
+
|
32 |
model_m2m.to(device)
|
33 |
m2m_available = True
|
34 |
+
logging.info("M2M100 model ready for use")
|
35 |
except Exception as e:
|
36 |
logging.error(f"Error loading M2M100 model: {e}")
|
37 |
m2m_available = False
|
38 |
+
logging.info("Setting m2m_available to False")
|
39 |
+
|
40 |
# Initialize ASR model
|
41 |
model_id = "bishaltwr/wav2vec2-large-mms-1b-nepali"
|
42 |
try:
|