Spaces:
Running
Running
add upgrade model version
Browse files
pages/upload.py
CHANGED
@@ -79,7 +79,7 @@ with st.container():
|
|
79 |
|
80 |
with col2:
|
81 |
st.markdown('<p style="font-size:16px; font-weight:bold; margin-bottom:4px;">🌐 Ngôn ngữ muốn dịch sang</p>', unsafe_allow_html=True)
|
82 |
-
target_lang = st.selectbox(" ", ["
|
83 |
|
84 |
# Xử lý file trực tiếp
|
85 |
def process_file(file, file_type):
|
|
|
79 |
|
80 |
with col2:
|
81 |
st.markdown('<p style="font-size:16px; font-weight:bold; margin-bottom:4px;">🌐 Ngôn ngữ muốn dịch sang</p>', unsafe_allow_html=True)
|
82 |
+
target_lang = st.selectbox(" ", ["vietnamese", "chinese", "english", "japanese"], key="target_lang")
|
83 |
|
84 |
# Xử lý file trực tiếp
|
85 |
def process_file(file, file_type):
|
translate/__pycache__/translator.cpython-310.pyc
CHANGED
Binary files a/translate/__pycache__/translator.cpython-310.pyc and b/translate/__pycache__/translator.cpython-310.pyc differ
|
|
translate/translator.py
CHANGED
@@ -21,7 +21,7 @@ def translate_text_dict(text_dict: Dict[str, List[str]], source_lang: str = "vi
|
|
21 |
Return the translated texts formatted like the original dictionary. Do NOT say anthing else. Return it as a JSON block."""
|
22 |
|
23 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
24 |
-
model = genai.GenerativeModel("gemini-2.
|
25 |
|
26 |
response = model.generate_content(prompt) # Use a model appropriate for your needs and API key. gemini-2.0-flash doesn't exist. 1.5-pro is a good general-purpose model.
|
27 |
|
|
|
21 |
Return the translated texts formatted like the original dictionary. Do NOT say anthing else. Return it as a JSON block."""
|
22 |
|
23 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
24 |
+
model = genai.GenerativeModel("gemini-2.5-pro")
|
25 |
|
26 |
response = model.generate_content(prompt) # Use a model appropriate for your needs and API key. gemini-2.0-flash doesn't exist. 1.5-pro is a good general-purpose model.
|
27 |
|
utils/__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/utils/__pycache__/utils.cpython-310.pyc and b/utils/__pycache__/utils.cpython-310.pyc differ
|
|
utils/utils.py
CHANGED
@@ -34,7 +34,7 @@ def translate_single_text(text: str, source_lang: str = 'English', target_lang:
|
|
34 |
retries = 0
|
35 |
while retries <= max_retries:
|
36 |
try:
|
37 |
-
model = genai.GenerativeModel('gemini-2.
|
38 |
|
39 |
system_prompt = f"""You are a translation engine.
|
40 |
Translate the following text accurately to {target_lang}.
|
@@ -141,7 +141,7 @@ def translate_text(text_dict, source_lang='English', target_lang="Vietnamese", m
|
|
141 |
retry_count = 0
|
142 |
while retry_count < max_retries:
|
143 |
try:
|
144 |
-
model = genai.GenerativeModel('gemini-2.
|
145 |
full_prompt = f"{system_prompt.strip()}\n\n{user_prompt.strip()}"
|
146 |
|
147 |
response = model.generate_content(
|
|
|
34 |
retries = 0
|
35 |
while retries <= max_retries:
|
36 |
try:
|
37 |
+
model = genai.GenerativeModel('gemini-2.5-pro') # hoặc 'gemini-1.5-flash'
|
38 |
|
39 |
system_prompt = f"""You are a translation engine.
|
40 |
Translate the following text accurately to {target_lang}.
|
|
|
141 |
retry_count = 0
|
142 |
while retry_count < max_retries:
|
143 |
try:
|
144 |
+
model = genai.GenerativeModel('gemini-2.5-pro')
|
145 |
full_prompt = f"{system_prompt.strip()}\n\n{user_prompt.strip()}"
|
146 |
|
147 |
response = model.generate_content(
|
word/__pycache__/word_helper.cpython-310.pyc
CHANGED
Binary files a/word/__pycache__/word_helper.cpython-310.pyc and b/word/__pycache__/word_helper.cpython-310.pyc differ
|
|
word/word_helper.py
CHANGED
@@ -36,7 +36,7 @@ def batch_translate(texts, source_lang = 'English', target_lang="Vietnamese"):
|
|
36 |
json_data = json.dumps({i: t for i, t in enumerate(texts)})
|
37 |
user_prompt = f"Target language: {target_lang}. JSON file: {json_data}"
|
38 |
|
39 |
-
model = genai.GenerativeModel('gemini-2.
|
40 |
response = model.generate_content(contents = system_prompt.strip() + "\n" + user_prompt.strip(), generation_config={
|
41 |
'temperature': 0.3, # Adjust temperature for desired creativity
|
42 |
'top_p': 1,
|
@@ -89,7 +89,7 @@ def batch_translate_loop(batch, source_lang, target_lang):
|
|
89 |
print(len(batch), len(translated_batch))
|
90 |
return translated_batch
|
91 |
|
92 |
-
def get_batches(texts, limit =
|
93 |
batches = []
|
94 |
batch = []
|
95 |
word_count = 0
|
@@ -108,7 +108,7 @@ def get_batches(texts, limit = 1000):
|
|
108 |
|
109 |
def full_translate(texts, source_lang = 'English', target_lang="Vietnamese"):
|
110 |
full_translated_texts = []
|
111 |
-
batches = get_batches(texts, limit =
|
112 |
word_count = 0
|
113 |
global time_spent_sleeping
|
114 |
|
|
|
36 |
json_data = json.dumps({i: t for i, t in enumerate(texts)})
|
37 |
user_prompt = f"Target language: {target_lang}. JSON file: {json_data}"
|
38 |
|
39 |
+
model = genai.GenerativeModel('gemini-2.5-pro')
|
40 |
response = model.generate_content(contents = system_prompt.strip() + "\n" + user_prompt.strip(), generation_config={
|
41 |
'temperature': 0.3, # Adjust temperature for desired creativity
|
42 |
'top_p': 1,
|
|
|
89 |
print(len(batch), len(translated_batch))
|
90 |
return translated_batch
|
91 |
|
92 |
+
def get_batches(texts, limit = 2000):
|
93 |
batches = []
|
94 |
batch = []
|
95 |
word_count = 0
|
|
|
108 |
|
109 |
def full_translate(texts, source_lang = 'English', target_lang="Vietnamese"):
|
110 |
full_translated_texts = []
|
111 |
+
batches = get_batches(texts, limit = 2000)
|
112 |
word_count = 0
|
113 |
global time_spent_sleeping
|
114 |
|