Alea Ddine commited on
Commit
ed107fd
Β·
1 Parent(s): 2bc512b

Fix target_lang error, update requirements.txt for MacOS compatibility, use eager attention

Browse files
Files changed (2) hide show
  1. app.py +3 -3
  2. requirements.txt +15 -5
app.py CHANGED
@@ -121,7 +121,7 @@ def translate_text_advanced(text, target_language, source_language="auto", style
121
  use_memory=True, custom_glossary=None, batch_mode=False):
122
  if not text.strip():
123
  return "⚠️ Please enter text to translate", 0, ""
124
- if target_language == "Select Language":
125
  return "⚠️ Please select the target language", 0, ""
126
 
127
  try:
@@ -154,7 +154,7 @@ def translate_text_advanced(text, target_language, source_language="auto", style
154
  with torch.no_grad():
155
  outputs = model.generate(
156
  inputs,
157
- max_new_tokens=4096, # ΩŠΨ―ΨΉΩ… Ω†Ψ΅ΩˆΨ΅ Ψ·ΩˆΩŠΩ„Ψ© ΩƒΩ…Ψ§ Ψ·Ω„Ψ¨Ψͺ
158
  temperature=style_config["temperature"],
159
  top_p=0.9,
160
  top_k=10,
@@ -186,7 +186,7 @@ def translate_text_advanced(text, target_language, source_language="auto", style
186
  "quality": quality_score
187
  })
188
 
189
- log_translation(source_language, target_lang, len(text), processing_time, quality_score, style)
190
 
191
  stats = f"""
192
  🎯 Translation Quality: {quality_score:.1f}%
 
121
  use_memory=True, custom_glossary=None, batch_mode=False):
122
  if not text.strip():
123
  return "⚠️ Please enter text to translate", 0, ""
124
+ if not target_language or target_language == "Select Language":
125
  return "⚠️ Please select the target language", 0, ""
126
 
127
  try:
 
154
  with torch.no_grad():
155
  outputs = model.generate(
156
  inputs,
157
+ max_new_tokens=4096,
158
  temperature=style_config["temperature"],
159
  top_p=0.9,
160
  top_k=10,
 
186
  "quality": quality_score
187
  })
188
 
189
+ log_translation(source_language, target_language, len(text), processing_time, quality_score, style)
190
 
191
  stats = f"""
192
  🎯 Translation Quality: {quality_score:.1f}%
requirements.txt CHANGED
@@ -1,7 +1,17 @@
1
  torch>=2.1.0
2
  transformers>=4.56.0
3
- gradio>=4.0.0
4
- accelerate
5
- bitsandbytes
6
- spaces
7
- flash-attention # For flash_attention_2 support
 
 
 
 
 
 
 
 
 
 
 
1
  torch>=2.1.0
2
  transformers>=4.56.0
3
+ gradio>=4.44.0
4
+ accelerate>=0.25.0
5
+ bitsandbytes>=0.41.0
6
+ numpy>=1.24.0
7
+ sentencepiece>=0.1.99
8
+ protobuf>=3.20.0
9
+ scipy>=1.10.0
10
+ safetensors>=0.4.0
11
+ psutil>=5.9.0
12
+ aiofiles>=23.0.0
13
+ pydantic>=2.0.0
14
+ python-dateutil>=2.8.2
15
+ colorama>=0.4.6
16
+ tqdm>=4.66.0
17
+ huggingface-hub>=0.19.0