prakhardoneria commited on
Commit
34e03e1
·
verified ·
1 Parent(s): f7ce9eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -49
app.py CHANGED
@@ -1,56 +1,57 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
  import json
5
 
6
- # Load MagicoderS-CL-7B model
7
- MODEL_NAME = "ise-uiuc/Magicoder-S-CL-7B"
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
-
11
- # System prompt
12
- SYSTEM_INSTRUCTION = (
13
- "You are a helpful assistant that writes UI code. "
14
- "Respond only in JSON format with the following keys: "
15
- "\"filename\", \"html\", \"css\", and \"js\". "
16
- "Do not include explanations. Just return a JSON object."
17
- )
18
-
19
- def generate_code(user_prompt):
20
- prompt = f"{SYSTEM_INSTRUCTION}\nPrompt: {user_prompt}"
21
-
22
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(model.device)
23
- outputs = model.generate(
24
- **inputs,
25
- max_new_tokens=512,
26
- temperature=0.7,
27
- top_p=0.9,
28
- do_sample=True
29
  )
30
- decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
 
 
 
31
 
32
- # Try to parse JSON
33
  try:
34
- result = json.loads(decoded)
35
- except json.JSONDecodeError:
36
- # fallback if output is not valid JSON
37
- result = {
38
- "filename": "index.html",
39
- "html": decoded,
40
- "css": "",
41
- "js": ""
42
- }
43
-
44
- return result
45
-
46
- # Gradio Interface
47
- iface = gr.Interface(
48
- fn=generate_code,
49
- inputs=gr.Textbox(lines=2, placeholder="Describe the UI you want..."),
50
- outputs="json",
51
- title="UI Code Generator",
52
- description="Enter a prompt to generate HTML, CSS, and JS code in structured JSON format."
53
- )
54
-
55
- if __name__ == "__main__":
56
- iface.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
3
  import json
4
 
5
+ MODEL_NAME = "tiiuae/falcon-rw-1b" # lightweight and fast
6
+
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
9
+
10
+ def generate_code(prompt, history):
11
+ history = history or []
12
+ system_instruction = (
13
+ "You are a coding assistant. "
14
+ "Respond only in valid JSON format like this:\n"
15
+ "{\n"
16
+ " \"filename\": \"index.html\",\n"
17
+ " \"html\": \"...HTML code...\",\n"
18
+ " \"css\": \"...CSS code...\",\n"
19
+ " \"js\": \"...JavaScript code...\"\n"
20
+ "}\n"
 
 
 
 
 
 
 
21
  )
22
+ full_prompt = system_instruction + "\nPrompt: " + prompt
23
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=512)
24
+ outputs = model.generate(**inputs, max_new_tokens=512)
25
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
 
 
27
  try:
28
+ parsed = json.loads(decoded)
29
+ html = parsed.get("html", "")
30
+ css = parsed.get("css", "")
31
+ js = parsed.get("js", "")
32
+ except Exception:
33
+ html, css, js = "", "", ""
34
+
35
+ history.append((prompt, decoded))
36
+ return html, css, js, history
37
+
38
+ def clear_history():
39
+ return "", "", "", []
40
+
41
+ with gr.Blocks() as demo:
42
+ chat_history = gr.State([])
43
+
44
+ with gr.Row():
45
+ inp = gr.Textbox(label="Prompt", lines=2)
46
+ send_btn = gr.Button("Generate")
47
+ clear_btn = gr.Button("New Chat")
48
+
49
+ with gr.Row():
50
+ html_out = gr.Code(label="HTML", language="html")
51
+ css_out = gr.Code(label="CSS", language="css")
52
+ js_out = gr.Code(label="JavaScript", language="javascript")
53
+
54
+ send_btn.click(generate_code, [inp, chat_history], [html_out, css_out, js_out, chat_history])
55
+ clear_btn.click(clear_history, outputs=[html_out, css_out, js_out])
56
+
57
+ demo.launch()