prakhardoneria commited on
Commit
2223fe4
·
verified ·
1 Parent(s): 1c98a01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -36
app.py CHANGED
@@ -1,39 +1,53 @@
1
  import gradio as gr
2
- from llama_cpp import Llama
3
- from huggingface_hub import hf_hub_download
4
-
5
- # Lazy global model
6
- llm = None
7
-
8
- def load_model():
9
- global llm
10
- if llm is None:
11
- # Download GGUF model from HF Hub
12
- model_path = hf_hub_download(
13
- repo_id="TheBloke/deepseek-coder-1.3b-instruct-GGUF",
14
- filename="deepseek-coder-1.3b-instruct.Q4_K_M.gguf"
15
- )
16
- # Load LLaMA model
17
- llm = Llama(
18
- model_path=model_path,
19
- n_ctx=1024,
20
- n_threads=4,
21
- n_gpu_layers=0, # Set 0 for CPU-only
22
- use_mlock=False
23
- )
24
-
25
- def generate_response(prompt):
26
- load_model()
27
- full_prompt = f"### Instruction:\n{prompt}\n\n### Response:\n"
28
- output = llm(full_prompt, max_tokens=512, stop=["###"])
29
- return output["choices"][0]["text"]
30
-
31
- demo = gr.Interface(
32
- fn=generate_response,
33
- inputs=gr.Textbox(lines=5, label="Enter your instruction"),
34
- outputs=gr.Textbox(lines=10, label="Model Response"),
35
- title="Viber AI",
36
- description="Ask the model to generate or modify code, HTML, or general text via instructions."
37
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+
4
+ # Load model and tokenizer
5
+ model_name = "Salesforce/codet5-small"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
+
9
+ # Code generation function
10
+ def generate_code(prompt, history):
11
+ history = history or []
12
+ input_text = "\n".join(history + [prompt])
13
+ inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
14
+ outputs = model.generate(inputs, max_length=512)
15
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
16
+
17
+ # Extract HTML, CSS, JS
18
+ html = css = js = ""
19
+ if "<html>" in decoded:
20
+ html = "<html>" + decoded.split("<html>")[1].split("</html>")[0] + "</html>"
21
+ if "<style>" in decoded:
22
+ css = decoded.split("<style>")[1].split("</style>")[0]
23
+ if "<script>" in decoded:
24
+ js = decoded.split("<script>")[1].split("</script>")[0]
25
+
26
+ history += [prompt, decoded]
27
+ return html.strip(), css.strip(), js.strip(), history
28
+
29
+ # Function to clear chat
30
+ def clear_chat():
31
+ return "", "", "", []
32
+
33
+ # Build Gradio UI
34
+ with gr.Blocks() as demo:
35
+ gr.Markdown("# Code Generator: HTML, CSS, JS")
36
+ with gr.Row():
37
+ prompt = gr.Textbox(label="Prompt", placeholder="Describe what you want to build...")
38
+ generate_btn = gr.Button("Generate")
39
+ new_chat_btn = gr.Button("New Chat")
40
+
41
+ with gr.Row():
42
+ html_output = gr.Code(label="HTML")
43
+ css_output = gr.Code(label="CSS")
44
+ js_output = gr.Code(label="JavaScript")
45
+
46
+ chat_state = gr.State([])
47
+
48
+ generate_btn.click(fn=generate_code, inputs=[prompt, chat_state],
49
+ outputs=[html_output, css_output, js_output, chat_state])
50
+ new_chat_btn.click(fn=clear_chat,
51
+ outputs=[html_output, css_output, js_output, chat_state])
52
 
53
  demo.launch()