Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,25 @@
|
|
1 |
-
import
|
2 |
-
import gradio as gr
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
-
|
6 |
-
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
|
7 |
|
8 |
model_id = "memorease/base-t5-v2"
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
|
10 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
|
11 |
|
12 |
-
|
13 |
-
def
|
|
|
|
|
|
|
14 |
instruction = "Generate a question based on the given memory."
|
15 |
prompt = instruction + " " + memory
|
|
|
16 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
|
17 |
outputs = model.generate(**inputs, max_new_tokens=64)
|
18 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
19 |
-
return result
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
1 |
+
from flask import Flask, request, jsonify
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
+
app = Flask(__name__)
|
|
|
5 |
|
6 |
model_id = "memorease/base-t5-v2"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
|
9 |
|
10 |
+
@app.route("/ask", methods=["POST"])
|
11 |
+
def ask_question():
|
12 |
+
data = request.json
|
13 |
+
memory = data["text"]
|
14 |
+
|
15 |
instruction = "Generate a question based on the given memory."
|
16 |
prompt = instruction + " " + memory
|
17 |
+
|
18 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
|
19 |
outputs = model.generate(**inputs, max_new_tokens=64)
|
20 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
21 |
|
22 |
+
return jsonify(question=result)
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
app.run(host="0.0.0.0", port=7860)
|