|
|
|
import gradio as gr |
|
from hf_model_adapter import HFLocalModelAdapter |
|
import os |
|
|
|
|
|
DEFAULT_MODEL = os.environ.get("HF_MODEL", "stabilityai/stablelm-3b-4e1t") |
|
|
|
|
|
try: |
|
hf_adapter = HFLocalModelAdapter(model_name=DEFAULT_MODEL) |
|
except Exception as e: |
|
|
|
hf_adapter = None |
|
model_load_error = str(e) |
|
else: |
|
model_load_error = None |
|
|
|
def radio_agents_pipeline(user_message): |
|
""" |
|
Simple Writer -> Editor -> QA flow using one HF model via adapter. |
|
Returns combined multi-stage output. |
|
""" |
|
if hf_adapter is None: |
|
return f"[Model not loaded] {model_load_error or 'unknown error'}" |
|
|
|
|
|
writer_prompt = ( |
|
"You are a radio script writer. Draft a short radio segment script based on: " |
|
+ user_message |
|
) |
|
writer_out = hf_adapter.generate(writer_prompt, max_new_tokens=400) |
|
|
|
|
|
editor_prompt = ( |
|
"You are an editor. Improve clarity, shorten sentences, and make it radio-friendly.\n\n" |
|
+ writer_out |
|
) |
|
edited_out = hf_adapter.generate(editor_prompt, max_new_tokens=300) |
|
|
|
|
|
qa_prompt = ( |
|
"You are a broadcast compliance QA. Check for profanity, disallowed statements, " |
|
"or anything requiring human review. Reply with 'OK' if fine, otherwise list issues.\n\n" |
|
+ edited_out |
|
) |
|
qa_out = hf_adapter.generate(qa_prompt, max_new_tokens=150) |
|
|
|
final_script = ( |
|
"π **Draft (Writer):**\n" |
|
+ writer_out |
|
+ "\n\nβοΈ **Edited (Editor):**\n" |
|
+ edited_out |
|
+ "\n\nβ
**QA Result (QA):**\n" |
|
+ qa_out |
|
) |
|
return final_script |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# ποΈ AutoGen-style Radio Content Creator (Gradio)") |
|
if model_load_error: |
|
gr.Markdown(f"**Model load error:** `{model_load_error}`\n\nSet `HF_MODEL` env var or check logs.") |
|
chatbot = gr.Chatbot(elem_id="chatbot", height=600) |
|
with gr.Row(): |
|
txt = gr.Textbox(label="Enter your prompt (e.g., '2-min morning show script about local news')", lines=2) |
|
btn = gr.Button("Send") |
|
|
|
def user_submit(prompt, chat_history): |
|
if not prompt or prompt.strip() == "": |
|
return "", chat_history |
|
response = radio_agents_pipeline(prompt) |
|
chat_history.append(("You: " + prompt, "Assistant:\n" + response)) |
|
return "", chat_history |
|
|
|
txt.submit(user_submit, [txt, chatbot], [txt, chatbot]) |
|
btn.click(user_submit, [txt, chatbot], [txt, chatbot]) |
|
|
|
def clear_chat(): |
|
return [] |
|
gr.Button("Clear Chat").click(clear_chat, None, chatbot) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(server_name="0.0.0.0", server_port=7860) |