File size: 2,854 Bytes
8b57c60 b86f3a9 6790199 8b57c60 b86f3a9 8b57c60 b86f3a9 8b57c60 b86f3a9 8b57c60 b86f3a9 8b57c60 6790199 8b57c60 6790199 b86f3a9 6790199 8b57c60 6790199 b86f3a9 6790199 8b57c60 b86f3a9 8b57c60 6790199 b86f3a9 8b57c60 6790199 b86f3a9 8b57c60 b86f3a9 8b57c60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# app.py
import gradio as gr
from hf_model_adapter import HFLocalModelAdapter
import os
# You can change this default to any HF model you prefer that is public or available to your account.
DEFAULT_MODEL = os.environ.get("HF_MODEL", "stabilityai/stablelm-3b-4e1t")
# Initialize adapter once
try:
hf_adapter = HFLocalModelAdapter(model_name=DEFAULT_MODEL)
except Exception as e:
# If model load failed, set adapter to None and show error in UI
hf_adapter = None
model_load_error = str(e)
else:
model_load_error = None
def radio_agents_pipeline(user_message):
"""
Simple Writer -> Editor -> QA flow using one HF model via adapter.
Returns combined multi-stage output.
"""
if hf_adapter is None:
return f"[Model not loaded] {model_load_error or 'unknown error'}"
# Writer
writer_prompt = (
"You are a radio script writer. Draft a short radio segment script based on: "
+ user_message
)
writer_out = hf_adapter.generate(writer_prompt, max_new_tokens=400)
# Editor
editor_prompt = (
"You are an editor. Improve clarity, shorten sentences, and make it radio-friendly.\n\n"
+ writer_out
)
edited_out = hf_adapter.generate(editor_prompt, max_new_tokens=300)
# QA
qa_prompt = (
"You are a broadcast compliance QA. Check for profanity, disallowed statements, "
"or anything requiring human review. Reply with 'OK' if fine, otherwise list issues.\n\n"
+ edited_out
)
qa_out = hf_adapter.generate(qa_prompt, max_new_tokens=150)
final_script = (
"📜 **Draft (Writer):**\n"
+ writer_out
+ "\n\n✂️ **Edited (Editor):**\n"
+ edited_out
+ "\n\n✅ **QA Result (QA):**\n"
+ qa_out
)
return final_script
with gr.Blocks() as demo:
gr.Markdown("# 🎙️ AutoGen-style Radio Content Creator (Gradio)")
if model_load_error:
gr.Markdown(f"**Model load error:** `{model_load_error}`\n\nSet `HF_MODEL` env var or check logs.")
chatbot = gr.Chatbot(elem_id="chatbot", height=600)
with gr.Row():
txt = gr.Textbox(label="Enter your prompt (e.g., '2-min morning show script about local news')", lines=2)
btn = gr.Button("Send")
def user_submit(prompt, chat_history):
if not prompt or prompt.strip() == "":
return "", chat_history
response = radio_agents_pipeline(prompt)
chat_history.append(("You: " + prompt, "Assistant:\n" + response))
return "", chat_history
txt.submit(user_submit, [txt, chatbot], [txt, chatbot])
btn.click(user_submit, [txt, chatbot], [txt, chatbot])
def clear_chat():
return []
gr.Button("Clear Chat").click(clear_chat, None, chatbot)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |