Spaces:
Sleeping
Sleeping
| import numpy as np | |
| import pandas as pd | |
| import requests | |
| import os | |
| import gradio as gr | |
| import json | |
| from dotenv import load_dotenv, find_dotenv | |
| _ = load_dotenv(find_dotenv()) | |
| from predibase import Predibase, FinetuningConfig, DeploymentConfig | |
| # Get a KEY from https://app.predibase.com/ | |
| api_token = os.getenv('PREDIBASE_API_KEY') | |
| pb = Predibase(api_token=api_token) | |
| adapter_id = 'tour-assistant-model/15' | |
| lorax_client = pb.deployments.client("solar-1-mini-chat-240612") | |
| def extract_json(gen_text, n_shot_learning=0): | |
| if(n_shot_learning == -1) : | |
| start_index = 0 | |
| else : | |
| start_index = gen_text.index("### Response:\n{") + 14 | |
| if(n_shot_learning > 0) : | |
| for i in range(0, n_shot_learning): | |
| gen_text = gen_text[start_index:] | |
| start_index = gen_text.index("### Response:\n{") + 14 | |
| end_index = gen_text.find("}\n\n### ") + 1 | |
| return gen_text[start_index:end_index] | |
| def get_completion(prompt): | |
| return lorax_client.generate(prompt, adapter_id=adapter_id, max_new_tokens=1000).generated_text | |
| def greet(input): | |
| sys_str = "You are a helpful support assistant. Answer the following question." | |
| qa_list = [] | |
| n_prompt_list = [] | |
| qa_list.append({ | |
| "question": "What are the benefits of joining a union?", | |
| "answer": "Collective bargaining of salary." | |
| }) | |
| qa_list.append({ | |
| "question": "How much are union dues, and what do they cover?", | |
| "answer": "The union dues for our union is 3%." | |
| }) | |
| qa_list.append({ | |
| "question": "How does the union handle grievances and disputes?", | |
| "answer": "There will be a panel to oversee disputes" | |
| }) | |
| qa_list.append({ | |
| "question": "Will joining a union affect my job security?", | |
| "answer": "No." | |
| }) | |
| qa_list.append({ | |
| "question": "What is the process for joining a union?", | |
| "answer": "Please use the contact form." | |
| }) | |
| # qa_list.append({ | |
| # "question": "How do unions negotiate contracts with employers?", | |
| # "answer": "Our dear leader will handle the negotiations." | |
| # }) | |
| # | |
| # qa_list.append({ | |
| # "question": "What role do I play as a union member?", | |
| # "answer": "You will be invited to our monthly picnics" | |
| # }) | |
| # | |
| # qa_list.append({ | |
| # "question": "How do unions ensure that employers comply with agreements?", | |
| # "answer": "We will have a monthly meeting for members" | |
| # }) | |
| # | |
| # qa_list.append({ | |
| # "question": "Can I be forced to join a union?", | |
| # "answer": "What kind of questions is that! Of course no!" | |
| # }) | |
| # | |
| # qa_list.append({ | |
| # "question": "What happens if I disagree with the union’s decisions?", | |
| # "answer": "We will agree to disagree" | |
| # }) | |
| for qna in qa_list: | |
| ques_str = qna["question"] | |
| ans_str = qna["answer"] | |
| n_prompt_list.append(f""" | |
| <|im_start|>system\n{sys_str}<|im_end|> | |
| <|im_start|>question\n{ques_str}<|im_end|> | |
| <|im_start|>answer\n{ans_str}<|im_end|> | |
| """ | |
| ) | |
| n_prompt_str = "\n" | |
| for prompt in n_prompt_list: | |
| n_prompt_str = n_prompt_str + prompt + "\n" | |
| total_prompt=f""" | |
| {n_prompt_str} | |
| <|im_start|>system\n{sys_str}<|im_end|> | |
| <|im_start|>question | |
| {input}\n<|im_end|> | |
| <|im_start|>answer | |
| """ | |
| print("***total_prompt:") | |
| print(total_prompt) | |
| response = get_completion(total_prompt) | |
| #gen_text = response["predictions"][0]["generated_text"] | |
| #return json.dumps(extract_json(gen_text, 3)) | |
| ###gen_text = response["choices"][0]["text"] | |
| #return gen_text | |
| ###return json.dumps(extract_json(gen_text, -1)) | |
| return response | |
| #return json.dumps(response) | |
| #iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
| #iface.launch() | |
| #iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Text to find entities", lines=2)], outputs=[gr.HighlightedText(label="Text with entities")], title="NER with dslim/bert-base-NER", description="Find entities using the `dslim/bert-base-NER` model under the hood!", allow_flagging="never", examples=["My name is Andrew and I live in California", "My name is Poli and work at HuggingFace"]) | |
| #iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Question", lines=3)], outputs="json") | |
| iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Question", lines=3)], outputs="text") | |
| iface.queue(api_open=True); | |
| iface.launch() | |