Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,49 +2,73 @@ import os
|
|
2 |
import streamlit as st
|
3 |
from git import Repo
|
4 |
import shutil
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
from llama_index.llms.llama_cpp import LlamaCPP
|
7 |
-
from llama_index.embeddings import HuggingFaceEmbedding
|
8 |
-
|
9 |
-
|
10 |
-
st.
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
if
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import streamlit as st
|
3 |
from git import Repo
|
4 |
import shutil
|
5 |
+
|
6 |
+
from llama_index.core import (
|
7 |
+
VectorStoreIndex,
|
8 |
+
SimpleDirectoryReader,
|
9 |
+
ServiceContext,
|
10 |
+
SentenceSplitter,
|
11 |
+
)
|
12 |
from llama_index.llms.llama_cpp import LlamaCPP
|
13 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
14 |
+
|
15 |
+
st.set_page_config(page_title="GitHub Repo Explainer", layout="wide")
|
16 |
+
st.title("π GitHub Repository Explainer (100% Free)")
|
17 |
+
|
18 |
+
github_url = st.text_input("GitHub URL", placeholder="https://github.com/user/repo")
|
19 |
+
|
20 |
+
if st.button("Load and Analyze"):
|
21 |
+
if github_url:
|
22 |
+
try:
|
23 |
+
# Clean previous repo if exists
|
24 |
+
if os.path.exists("repo"):
|
25 |
+
shutil.rmtree("repo")
|
26 |
+
|
27 |
+
with st.spinner("π₯ Cloning GitHub repository..."):
|
28 |
+
Repo.clone_from(github_url, "repo")
|
29 |
+
st.success("β
Repo cloned successfully.")
|
30 |
+
|
31 |
+
with st.spinner("π§ Loading LLM and embeddings..."):
|
32 |
+
llm = LlamaCPP(
|
33 |
+
model_path="tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
|
34 |
+
temperature=0.7,
|
35 |
+
max_new_tokens=512,
|
36 |
+
context_window=2048,
|
37 |
+
generate_kwargs={"top_p": 0.95, "top_k": 50},
|
38 |
+
model_kwargs={"n_gpu_layers": 20, "n_batch": 512},
|
39 |
+
verbose=True,
|
40 |
+
)
|
41 |
+
|
42 |
+
embed_model = HuggingFaceEmbedding(
|
43 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
44 |
+
)
|
45 |
+
|
46 |
+
service_context = ServiceContext.from_defaults(
|
47 |
+
llm=llm,
|
48 |
+
embed_model=embed_model,
|
49 |
+
node_parser=SentenceSplitter(chunk_size=512, chunk_overlap=50),
|
50 |
+
)
|
51 |
+
|
52 |
+
with st.spinner("π Reading and parsing files..."):
|
53 |
+
docs = SimpleDirectoryReader("repo").load_data()
|
54 |
+
st.write(f"π {len(docs)} documents loaded.")
|
55 |
+
|
56 |
+
with st.spinner("π Building index..."):
|
57 |
+
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
|
58 |
+
query_engine = index.as_query_engine()
|
59 |
+
|
60 |
+
with st.spinner("π§ Querying the model..."):
|
61 |
+
query = "Explain the purpose, structure, and setup steps of this GitHub repository."
|
62 |
+
response = query_engine.query(query)
|
63 |
+
|
64 |
+
st.subheader("π§Ύ Repository Summary")
|
65 |
+
st.write(str(response))
|
66 |
+
|
67 |
+
except Exception as e:
|
68 |
+
st.error(f"β Something went wrong:\n\n{e}")
|
69 |
+
else:
|
70 |
+
st.warning("β οΈ Please enter a GitHub repo URL.")
|
71 |
+
|
72 |
+
if st.button("Reset"):
|
73 |
+
shutil.rmtree("repo", ignore_errors=True)
|
74 |
+
st.experimental_rerun()
|