Spaces:
Runtime error
Runtime error
iamspruce
commited on
Commit
·
8a2a7e9
1
Parent(s):
6540e3e
added project files
Browse files- Dockerfile +10 -0
- app/main.py +45 -0
- app/models.py +31 -0
- app/prompts.py +14 -0
- requirements.txt +8 -0
Dockerfile
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
COPY requirements.txt .
|
6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
7 |
+
|
8 |
+
COPY . .
|
9 |
+
|
10 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app/main.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Body
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app import models, prompts
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
class TextInput(BaseModel):
|
8 |
+
text: str
|
9 |
+
mode: str
|
10 |
+
tone: str = None
|
11 |
+
target_lang: str = None
|
12 |
+
|
13 |
+
@app.post("/rewrite")
|
14 |
+
def rewrite(input: TextInput):
|
15 |
+
text = input.text
|
16 |
+
mode = input.mode
|
17 |
+
|
18 |
+
if mode == "grammar":
|
19 |
+
return {"result": models.run_grammar_correction(text)}
|
20 |
+
|
21 |
+
elif mode == "paraphrase":
|
22 |
+
prompt = prompts.paraphrase_prompt(text)
|
23 |
+
return {"result": models.run_flan_prompt(prompt)}
|
24 |
+
|
25 |
+
elif mode == "clarity":
|
26 |
+
prompt = prompts.clarity_prompt(text)
|
27 |
+
return {"result": models.run_flan_prompt(prompt)}
|
28 |
+
|
29 |
+
elif mode == "fluency":
|
30 |
+
prompt = prompts.fluency_prompt(text)
|
31 |
+
return {"result": models.run_flan_prompt(prompt)}
|
32 |
+
|
33 |
+
elif mode == "tone" and input.tone:
|
34 |
+
prompt = prompts.tone_prompt(text, input.tone)
|
35 |
+
return {"result": models.run_flan_prompt(prompt)}
|
36 |
+
|
37 |
+
elif mode == "translate" and input.target_lang:
|
38 |
+
return {"result": models.run_translation(text, input.target_lang)}
|
39 |
+
|
40 |
+
elif mode == "pronoun":
|
41 |
+
prompt = prompts.pronoun_friendly_prompt(text)
|
42 |
+
return {"result": models.run_flan_prompt(prompt)}
|
43 |
+
|
44 |
+
else:
|
45 |
+
return {"error": "Invalid request"}
|
app/models.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
2 |
+
import torch
|
3 |
+
|
4 |
+
device = torch.device("cpu")
|
5 |
+
|
6 |
+
# Grammar model
|
7 |
+
grammar_tokenizer = AutoTokenizer.from_pretrained("vennify/t5-base-grammar-correction")
|
8 |
+
grammar_model = AutoModelForSeq2SeqLM.from_pretrained("vennify/t5-base-grammar-correction").to(device)
|
9 |
+
|
10 |
+
# FLAN-T5 for all prompts
|
11 |
+
flan_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
|
12 |
+
flan_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small").to(device)
|
13 |
+
|
14 |
+
# Translation model
|
15 |
+
trans_tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ROMANCE")
|
16 |
+
trans_model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ROMANCE").to(device)
|
17 |
+
|
18 |
+
def run_grammar_correction(text: str):
|
19 |
+
inputs = grammar_tokenizer(f"fix: {text}", return_tensors="pt").to(device)
|
20 |
+
outputs = grammar_model.generate(**inputs)
|
21 |
+
return grammar_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
22 |
+
|
23 |
+
def run_flan_prompt(prompt: str):
|
24 |
+
inputs = flan_tokenizer(prompt, return_tensors="pt").to(device)
|
25 |
+
outputs = flan_model.generate(**inputs)
|
26 |
+
return flan_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
27 |
+
|
28 |
+
def run_translation(text: str, target_lang: str):
|
29 |
+
inputs = trans_tokenizer(f">>{target_lang}<< {text}", return_tensors="pt").to(device)
|
30 |
+
outputs = trans_model.generate(**inputs)
|
31 |
+
return trans_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
app/prompts.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def tone_prompt(text, tone):
|
2 |
+
return f"Rewrite the following text in a {tone} tone: {text}"
|
3 |
+
|
4 |
+
def clarity_prompt(text):
|
5 |
+
return f"Make this clearer: {text}"
|
6 |
+
|
7 |
+
def fluency_prompt(text):
|
8 |
+
return f"Improve the fluency of this sentence: {text}"
|
9 |
+
|
10 |
+
def paraphrase_prompt(text):
|
11 |
+
return f"Paraphrase: {text}"
|
12 |
+
|
13 |
+
def pronoun_friendly_prompt(text):
|
14 |
+
return f"Rewrite the text using inclusive and non-offensive pronouns: {text}"
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
sentencepiece
|
6 |
+
pyspellchecker
|
7 |
+
spacy
|
8 |
+
nltk
|