Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,22 +1,15 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import subprocess
|
| 3 |
import streamlit as st
|
| 4 |
-
from pylint import lint
|
| 5 |
-
from io import StringIO
|
| 6 |
-
import streamlit as st
|
| 7 |
-
import os
|
| 8 |
-
import subprocess
|
| 9 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 10 |
import black
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
try:
|
| 18 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 19 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 20 |
|
| 21 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
| 22 |
PROJECT_ROOT = "projects"
|
|
@@ -46,10 +39,8 @@ class AIAgent:
|
|
| 46 |
def create_agent_prompt(self):
|
| 47 |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
| 48 |
agent_prompt = f"""
|
| 49 |
-
|
| 50 |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
| 51 |
{skills_str}
|
| 52 |
-
|
| 53 |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
| 54 |
"""
|
| 55 |
return agent_prompt
|
|
@@ -112,7 +103,7 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
| 112 |
|
| 113 |
# Combine the agent prompt with user input
|
| 114 |
combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
|
| 115 |
-
|
| 116 |
# Truncate input text to avoid exceeding the model's maximum length
|
| 117 |
max_input_length = 900
|
| 118 |
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
|
@@ -167,6 +158,22 @@ def terminal_interface(command, project_name=None):
|
|
| 167 |
st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
|
| 168 |
return result.stderr
|
| 169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
def summarize_text(text):
|
| 171 |
summarizer = pipeline("summarization")
|
| 172 |
summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
|
|
@@ -179,34 +186,10 @@ def sentiment_analysis(text):
|
|
| 179 |
st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
|
| 180 |
return sentiment[0]
|
| 181 |
|
| 182 |
-
# ... [rest of the translate_code function, but remove the OpenAI API call and replace it with your own logic] ...
|
| 183 |
-
|
| 184 |
-
def generate_code(code_idea):
|
| 185 |
-
# Replace this with a call to a Hugging Face model or your own logic
|
| 186 |
-
# For example, using a text-generation pipeline:
|
| 187 |
-
generator = pipeline('text-generation', model='gpt4o')
|
| 188 |
-
generated_code = generator(code_idea, max_length=10000, num_return_sequences=1)[0]['generated_text']
|
| 189 |
-
messages=[
|
| 190 |
-
{"role": "system", "content": "You are an expert software developer."},
|
| 191 |
-
{"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
|
| 192 |
-
]
|
| 193 |
-
st.session_state.current_state['toolbox']['generated_code'] = generated_code
|
| 194 |
-
|
| 195 |
-
return generated_code
|
| 196 |
-
|
| 197 |
def translate_code(code, input_language, output_language):
|
| 198 |
# Define a dictionary to map programming languages to their corresponding file extensions
|
| 199 |
language_extensions = {
|
| 200 |
-
|
| 201 |
-
"JavaScript": "js",
|
| 202 |
-
"Java": "java",
|
| 203 |
-
"C++": "cpp",
|
| 204 |
-
"C#": "cs",
|
| 205 |
-
"Ruby": "rb",
|
| 206 |
-
"Go": "go",
|
| 207 |
-
"PHP": "php",
|
| 208 |
-
"Swift": "swift",
|
| 209 |
-
"TypeScript": "ts",
|
| 210 |
}
|
| 211 |
|
| 212 |
# Add code to handle edge cases such as invalid input and unsupported programming languages
|
|
@@ -329,11 +312,11 @@ elif app_mode == "Tool Box":
|
|
| 329 |
# Text Translation Tool (Code Translation)
|
| 330 |
st.subheader("Translate Code")
|
| 331 |
code_to_translate = st.text_area("Enter code to translate:")
|
| 332 |
-
|
| 333 |
-
|
| 334 |
if st.button("Translate Code"):
|
| 335 |
-
translated_code = translate_code(code_to_translate,
|
| 336 |
-
st.code(translated_code, language=
|
| 337 |
|
| 338 |
# Code Generation
|
| 339 |
st.subheader("Code Generation")
|
|
@@ -406,6 +389,28 @@ elif app_mode == "Workspace Chat App":
|
|
| 406 |
st.subheader("Workspace Projects")
|
| 407 |
for project, details in st.session_state.workspace_projects.items():
|
| 408 |
st.write(f"Project: {project}")
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import sys
|
| 3 |
import subprocess
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 6 |
import black
|
| 7 |
+
from pylint import lint
|
| 8 |
+
from io import StringIO
|
| 9 |
+
import openai
|
| 10 |
+
|
| 11 |
+
# Set your OpenAI API key here
|
| 12 |
+
openai.api_key = "YOUR_OPENAI_API_KEY"
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
| 15 |
PROJECT_ROOT = "projects"
|
|
|
|
| 39 |
def create_agent_prompt(self):
|
| 40 |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
| 41 |
agent_prompt = f"""
|
|
|
|
| 42 |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
| 43 |
{skills_str}
|
|
|
|
| 44 |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
| 45 |
"""
|
| 46 |
return agent_prompt
|
|
|
|
| 103 |
|
| 104 |
# Combine the agent prompt with user input
|
| 105 |
combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
|
| 106 |
+
|
| 107 |
# Truncate input text to avoid exceeding the model's maximum length
|
| 108 |
max_input_length = 900
|
| 109 |
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
|
|
|
| 158 |
st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
|
| 159 |
return result.stderr
|
| 160 |
|
| 161 |
+
def code_editor_interface(code):
|
| 162 |
+
try:
|
| 163 |
+
formatted_code = black.format_str(code, mode=black.FileMode())
|
| 164 |
+
except black.NothingChanged:
|
| 165 |
+
formatted_code = code
|
| 166 |
+
result = StringIO()
|
| 167 |
+
sys.stdout = result
|
| 168 |
+
sys.stderr = result
|
| 169 |
+
(pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
|
| 170 |
+
sys.stdout = sys.__stdout__
|
| 171 |
+
sys.stderr = sys.__stderr__
|
| 172 |
+
lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
|
| 173 |
+
st.session_state.current_state['toolbox']['formatted_code'] = formatted_code
|
| 174 |
+
st.session_state.current_state['toolbox']['lint_message'] = lint_message
|
| 175 |
+
return formatted_code, lint_message
|
| 176 |
+
|
| 177 |
def summarize_text(text):
|
| 178 |
summarizer = pipeline("summarization")
|
| 179 |
summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
|
|
|
|
| 186 |
st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
|
| 187 |
return sentiment[0]
|
| 188 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
def translate_code(code, input_language, output_language):
|
| 190 |
# Define a dictionary to map programming languages to their corresponding file extensions
|
| 191 |
language_extensions = {
|
| 192 |
+
# ignore the specific languages right now, and continue to EOF
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
}
|
| 194 |
|
| 195 |
# Add code to handle edge cases such as invalid input and unsupported programming languages
|
|
|
|
| 312 |
# Text Translation Tool (Code Translation)
|
| 313 |
st.subheader("Translate Code")
|
| 314 |
code_to_translate = st.text_area("Enter code to translate:")
|
| 315 |
+
source_language = st.text_input("Enter source language (e.g. 'Python'):")
|
| 316 |
+
target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
|
| 317 |
if st.button("Translate Code"):
|
| 318 |
+
translated_code = translate_code(code_to_translate, source_language, target_language)
|
| 319 |
+
st.code(translated_code, language=target_language.lower())
|
| 320 |
|
| 321 |
# Code Generation
|
| 322 |
st.subheader("Code Generation")
|
|
|
|
| 389 |
st.subheader("Workspace Projects")
|
| 390 |
for project, details in st.session_state.workspace_projects.items():
|
| 391 |
st.write(f"Project: {project}")
|
| 392 |
+
for file in details['files']:
|
| 393 |
+
st.write(f" - {file}")
|
| 394 |
+
|
| 395 |
+
# Chat with AI Agents
|
| 396 |
+
st.subheader("Chat with AI Agents")
|
| 397 |
+
selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
|
| 398 |
+
agent_chat_input = st.text_area("Enter your message for the agent:")
|
| 399 |
+
if st.button("Send to Agent"):
|
| 400 |
+
agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
|
| 401 |
+
st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
|
| 402 |
+
st.write(f"{selected_agent}: {agent_chat_response}")
|
| 403 |
+
|
| 404 |
+
# Automate Build Process
|
| 405 |
+
st.subheader("Automate Build Process")
|
| 406 |
+
if st.button("Automate"):
|
| 407 |
+
agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
|
| 408 |
+
summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
|
| 409 |
+
st.write("Autonomous Build Summary:")
|
| 410 |
+
st.write(summary)
|
| 411 |
+
st.write("Next Step:")
|
| 412 |
+
st.write(next_step)
|
| 413 |
+
|
| 414 |
+
# Display current state for debugging
|
| 415 |
+
st.sidebar.subheader("Current State")
|
| 416 |
+
st.sidebar.json(st.session_state.current_state)
|