memorease commited on
Commit
5f4e213
·
verified ·
1 Parent(s): b1931b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -1,9 +1,13 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  from flask import Flask, request, jsonify
 
 
 
 
3
 
4
  app = Flask(__name__)
5
 
6
- # Model yükleniyor
7
  model_id = "memorease/fine_tuned_flan_v2"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
@@ -13,10 +17,8 @@ def generate_question():
13
  data = request.json
14
  memory = data.get("memory", "")
15
  instruction = data.get("instruction", "Generate a question based on the given memory.")
16
-
17
  prompt = instruction + " " + memory
18
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
19
  outputs = model.generate(**inputs, max_new_tokens=64)
20
-
21
- question = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
- return jsonify({"question": question})
 
1
+ import os
2
  from flask import Flask, request, jsonify
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+
5
+ # 💡 Hugging Face cache klasörünü geçici olarak yönlendir
6
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
7
 
8
  app = Flask(__name__)
9
 
10
+ # Model yükle
11
  model_id = "memorease/fine_tuned_flan_v2"
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
 
17
  data = request.json
18
  memory = data.get("memory", "")
19
  instruction = data.get("instruction", "Generate a question based on the given memory.")
 
20
  prompt = instruction + " " + memory
21
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
22
  outputs = model.generate(**inputs, max_new_tokens=64)
23
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ return jsonify({"question": result})