Update app.py
Browse files
app.py
CHANGED
@@ -24,6 +24,9 @@ def stop_generation():
|
|
24 |
return "Generation stopped."
|
25 |
|
26 |
def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
|
|
|
|
|
27 |
device = torch.device("cpu")
|
28 |
vocab_mlm = create_vocab()
|
29 |
vocab_mlm = add_tokens_to_vocab(vocab_mlm)
|
@@ -33,9 +36,9 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
33 |
model = torch.load(save_path, map_location=torch.device('cpu'))
|
34 |
model = model.to(device)
|
35 |
|
36 |
-
|
37 |
-
is_stopped = False
|
38 |
X3 = "X" * len(X0)
|
|
|
39 |
msa_data = pd.read_csv('conoData_C0.csv')
|
40 |
msa = msa_data['Sequences'].tolist()
|
41 |
msa = [x for x in msa if x.startswith(f"{X1}|{X2}")]
|
@@ -93,7 +96,6 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
93 |
if time.time() - start_time > 1200:
|
94 |
break
|
95 |
|
96 |
-
gen_len = len(X0)
|
97 |
seq = [f"{X1}|{X2}|{X3}|{X4}|{X5}|{X6}"]
|
98 |
vocab_mlm.token_to_idx["X"] = 4
|
99 |
|
|
|
24 |
return "Generation stopped."
|
25 |
|
26 |
def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
27 |
+
global is_stopped
|
28 |
+
is_stopped = False
|
29 |
+
|
30 |
device = torch.device("cpu")
|
31 |
vocab_mlm = create_vocab()
|
32 |
vocab_mlm = add_tokens_to_vocab(vocab_mlm)
|
|
|
36 |
model = torch.load(save_path, map_location=torch.device('cpu'))
|
37 |
model = model.to(device)
|
38 |
|
39 |
+
|
|
|
40 |
X3 = "X" * len(X0)
|
41 |
+
print(X3)
|
42 |
msa_data = pd.read_csv('conoData_C0.csv')
|
43 |
msa = msa_data['Sequences'].tolist()
|
44 |
msa = [x for x in msa if x.startswith(f"{X1}|{X2}")]
|
|
|
96 |
if time.time() - start_time > 1200:
|
97 |
break
|
98 |
|
|
|
99 |
seq = [f"{X1}|{X2}|{X3}|{X4}|{X5}|{X6}"]
|
100 |
vocab_mlm.token_to_idx["X"] = 4
|
101 |
|