from huggingface_hub import InferenceClient, HfApi, upload_file
import datetime
import gradio as gr
import random
import prompts
import json
import uuid
import os
token=os.environ.get("HF_TOKEN")
username="omnibus"
dataset_name="tmp"
api=HfApi(token="")
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
history = []
hist_out= []
summary =[]
main_point=[]
summary.append("")
main_point.append("")
def format_prompt(message, history):
prompt = ""
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response} "
prompt += f"[INST] {message} [/INST]"
return prompt
agents =[
"COMMENTER",
"BLOG_POSTER",
"REPLY_TO_COMMENTER",
"COMPRESS_HISTORY_PROMPT"
]
temperature=0.9
max_new_tokens=256
max_new_tokens2=10480
top_p=0.95
repetition_penalty=1.0,
def compress_history(formatted_prompt):
seed = random.randint(1,1111111111111111)
agent=prompts.COMPRESS_HISTORY_PROMPT.format(history=summary[0],focus=main_point[0])
system_prompt=agent
temperature = 0.9
if temperature < 1e-2:
temperature = 1e-2
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=30480,
top_p=0.95,
repetition_penalty=1.0,
do_sample=True,
seed=seed,
)
#history.append((prompt,""))
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
formatted_prompt = formatted_prompt
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
#history.append((output,history))
print(output)
print(main_point[0])
return output
def question_generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,):
#def question_generate(prompt, history):
seed = random.randint(1,1111111111111111)
agent=prompts.COMMENTER.format(focus=main_point[0])
system_prompt=agent
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=seed,
)
#history.append((prompt,""))
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
#history.append((output,history))
return output
def blog_poster_reply(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,):
#def question_generate(prompt, history):
seed = random.randint(1,1111111111111111)
agent=prompts.REPLY_TO_COMMENTER.format(focus=main_point[0])
system_prompt=agent
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=seed,
)
#history.append((prompt,""))
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
#history.append((output,history))
return output
def create_valid_filename(invalid_filename: str) -> str:
"""Converts invalid characters in a string to be suitable for a filename."""
invalid_filename.replace(" ","-")
valid_chars = '-'.join(invalid_filename.split())
allowed_chars = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_', '-')
return ''.join(char for char in valid_chars if char in allowed_chars)
def load_html(inp):
ht=""
if inp:
for ea in inp:
outp,prom=ea
ht+=f"""
{outp}
{prom}