dfsfd
Browse files- app.py +33 -13
- requirements.txt +3 -6
app.py
CHANGED
@@ -19,23 +19,43 @@ before = datetime.datetime.now()
|
|
19 |
# tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
20 |
# model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
21 |
|
22 |
-
|
23 |
-
import
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
|
29 |
-
inputs = tokenizer(
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
st.write('gerando a saida...')
|
32 |
-
outputs = model(inputs)
|
33 |
|
34 |
-
last_hidden_states = outputs.last_hidden_state
|
35 |
|
36 |
-
output = last_hidden_states
|
37 |
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
|
41 |
# st.write('tokenizando...')
|
@@ -118,8 +138,8 @@ print('saida gerada.')
|
|
118 |
# answer = 'A: ' + answer
|
119 |
|
120 |
print('\n\n')
|
121 |
-
print(question)
|
122 |
-
print(response)
|
123 |
|
124 |
after = datetime.datetime.now()
|
125 |
|
|
|
19 |
# tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
20 |
# model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
21 |
|
22 |
+
# Load model directly
|
23 |
+
from transformers import AutoTokenizer, Phi3ForCausalLM
|
24 |
|
25 |
+
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
27 |
|
28 |
+
prompt = "Qual é o maior planeta do sistema solar ?"
|
29 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
30 |
+
|
31 |
+
# Generate
|
32 |
+
generate_ids = model.generate(inputs.input_ids, max_length=30)
|
33 |
+
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
34 |
+
|
35 |
+
st.write(output)
|
36 |
|
|
|
|
|
37 |
|
|
|
38 |
|
|
|
39 |
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
# tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
|
46 |
+
# model = TFRobertaModel.from_pretrained("FacebookAI/roberta-base")
|
47 |
+
|
48 |
+
# st.write('tokenizando...')
|
49 |
+
# inputs = tokenizer(question, return_tensors="tf")
|
50 |
+
|
51 |
+
# st.write('gerando a saida...')
|
52 |
+
# outputs = model(inputs)
|
53 |
+
|
54 |
+
# last_hidden_states = outputs.last_hidden_state
|
55 |
+
|
56 |
+
# output = last_hidden_states
|
57 |
+
|
58 |
+
# st.write(output)
|
59 |
|
60 |
|
61 |
# st.write('tokenizando...')
|
|
|
138 |
# answer = 'A: ' + answer
|
139 |
|
140 |
print('\n\n')
|
141 |
+
# print(question)
|
142 |
+
# print(response)
|
143 |
|
144 |
after = datetime.datetime.now()
|
145 |
|
requirements.txt
CHANGED
@@ -1,8 +1,5 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
transformers==4.44.0
|
5 |
-
optimum
|
6 |
-
auto_gptq==0.5.0
|
7 |
|
8 |
|
|
|
1 |
+
torch
|
2 |
+
streamlit
|
3 |
+
transformers
|
|
|
|
|
|
|
4 |
|
5 |
|