Spaces:
Running
Running
| from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| import random | |
| import prompts | |
| client = InferenceClient( | |
| "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
| ) | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| agents =[ | |
| "WEB_DEV", | |
| "AI_SYSTEM_PROMPT", | |
| "PYTHON_CODE_DEV", | |
| "CODE_REVIEW_ASSISTANT", | |
| "CONTENT_WRITER_EDITOR", | |
| "SOCIAL_MEDIA_MANAGER", | |
| "MEME_GENERATOR", | |
| "QUESTION_GENERATOR", | |
| "IMAGE_GENERATOR", | |
| "HUGGINGFACE_FILE_DEV", | |
| ] | |
| def generate( | |
| prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, | |
| ): | |
| seed = random.randint(1,1111111111111111) | |
| agent=prompts.WEB_DEV | |
| if agent_name == "WEB_DEV": | |
| agent = prompts.WEB_DEV_SYSTEM_PROMPT | |
| if agent_name == "CODE_REVIEW_ASSISTANT": | |
| agent = prompts.CODE_REVIEW_ASSISTANT | |
| if agent_name == "CONTENT_WRITER_EDITOR": | |
| agent = prompts.CONTENT_WRITER_EDITOR | |
| if agent_name == "SOCIAL_MEDIA_MANAGER": | |
| agent = prompts.SOCIAL_MEDIA_MANAGER | |
| if agent_name == "AI_SYSTEM_PROMPT": | |
| agent = prompts.AI_SYSTEM_PROMPT | |
| if agent_name == "PYTHON_CODE_DEV": | |
| agent = prompts.PYTHON_CODE_DEV | |
| if agent_name == "MEME_GENERATOR": | |
| agent = prompts.MEME_GENERATOR | |
| if agent_name == "QUESTION_GENERATOR": | |
| agent = prompts.QUESTION_GENERATOR | |
| if agent_name == "IMAGE_GENERATOR": | |
| agent = prompts.IMAGE_GENERATOR | |
| if agent_name == "HUGGINGFACE_FILE_DEV": | |
| agent = prompts.HUGGINGFACE_FILE_DEV | |
| system_prompt=agent | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=seed, | |
| ) | |
| formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| additional_inputs=[ | |
| gr.Dropdown( | |
| label="Agents", | |
| choices=[s for s in agents], | |
| value=agents[0], | |
| interactive=True, | |
| ), | |
| gr.Textbox( | |
| label="System Prompt", | |
| max_lines=1, | |
| interactive=True, | |
| ), | |
| gr.Slider( | |
| label="Temperature", | |
| value=0.9, | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values produce more diverse outputs", | |
| ), | |
| gr.Slider( | |
| label="Max new tokens", | |
| value=1048*10, | |
| minimum=0, | |
| maximum=1048*10, | |
| step=64, | |
| interactive=True, | |
| info="The maximum numbers of new tokens", | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| value=0.90, | |
| minimum=0.0, | |
| maximum=1, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values sample more low-probability tokens", | |
| ), | |
| gr.Slider( | |
| label="Repetition penalty", | |
| value=1.2, | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Penalize repeated tokens", | |
| ), | |
| ] | |
| examples=[["Write a simple working game in HTML5", agents[0], None, None, None, None, ], | |
| ["Choose 3 useful types of AI agents, and create a detailed System Prompt to align each of them.", agents[1], None, None, None, None, ], | |
| ["Create 3 of the funniest memes", agents[6], None, None, None, None, ], | |
| ["Explain it to me in a childrens story how Nuclear Fission works", agents[4], None, None, None, None, ], | |
| ["Show a bunch of examples of catchy ways to post, 'I had a ham sandwich for lunch today'", agents[5], None, None, None, None, ], | |
| ["Write high quality personal website to show off my adventure sports hobby", agents[0], None, None, None, None, ], | |
| ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", agents[4], None, None, None, None, ], | |
| ["Can you write a short story about a time-traveling detective who solves historical mysteries?", agents[4], None, None, None, None,], | |
| ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", agents[4], None, None, None, None,], | |
| ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", agents[4], None, None, None, None,], | |
| ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", agents[2], None, None, None, None,], | |
| ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", agents[3], None, None, None, None,], | |
| ] | |
| gr.ChatInterface( | |
| fn=generate, | |
| chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
| additional_inputs=additional_inputs, | |
| title="Mixtral 46.7B", | |
| examples=examples, | |
| concurrency_limit=20, | |
| ).launch(show_api=False) | |