Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,73 +1,48 @@
|
|
1 |
-
import os
|
2 |
-
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
3 |
-
os.environ["NO_CUDA_EXT"] = "1"
|
4 |
-
|
5 |
-
|
6 |
-
from reader_llm import get_reader_llm
|
7 |
-
from retrieval import get_retriever
|
8 |
-
from answer_rag import answer_with_rag2
|
9 |
-
import streamlit as st
|
10 |
-
|
11 |
-
# Настройка страницы
|
12 |
-
st.set_page_config(page_title="RAG", layout="wide")
|
13 |
-
st.title("Туристический путеводитель")
|
14 |
-
st.header("Города: Ярославль, Екатеринбург, Нижний Новгород, Владимир")
|
15 |
-
|
16 |
-
@st.cache_resource
|
17 |
-
def load_models():
|
18 |
-
READER_LLM = get_reader_llm(name="Vikhrmodels/Vikhr-Llama-3.2-1B-Instruct")
|
19 |
-
#
|
20 |
-
embedding_model, KNOWLEDGE_VECTOR_DATABASE = get_retriever()
|
21 |
-
return READER_LLM, embedding_model, KNOWLEDGE_VECTOR_DATABASE
|
22 |
-
|
23 |
-
READER_LLM, _, KNOWLEDGE_VECTOR_DATABASE = load_models()
|
24 |
-
|
25 |
-
if "messages" not in st.session_state:
|
26 |
-
st.session_state.messages = []
|
27 |
-
|
28 |
-
for message in st.session_state.messages:
|
29 |
-
with st.chat_message(message["role"]):
|
30 |
-
st.markdown(message["content"])
|
31 |
-
|
32 |
-
if prompt := st.chat_input("Задайте Ваш вопрос"):
|
33 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
34 |
-
|
35 |
-
with st.chat_message("user"):
|
36 |
-
st.markdown(prompt)
|
37 |
-
|
38 |
-
with st.chat_message("assistant"):
|
39 |
-
with st.spinner("Ищу информацию..."):
|
40 |
-
answer, sources = answer_with_rag2(
|
41 |
-
question=prompt,
|
42 |
-
llm=READER_LLM,
|
43 |
-
knowledge_index=KNOWLEDGE_VECTOR_DATABASE
|
44 |
-
)
|
45 |
-
st.markdown(answer)
|
46 |
-
|
47 |
-
|
48 |
-
# st.markdown("**Источники информации:**")
|
49 |
-
# for i, doc in enumerate(sources):
|
50 |
-
# with st.expander(f"Источник {i+1}"):
|
51 |
-
# st.write(doc.page_content)
|
52 |
-
# if hasattr(doc, 'metadata'):
|
53 |
-
# if "latitude" in doc.metadata and "longitude" in doc.metadata:
|
54 |
-
# st.write(f"📍 Координаты: {doc.metadata['latitude']}, {doc.metadata['longitude']}")
|
55 |
-
# if "image" in doc.metadata and doc.metadata["image"]:
|
56 |
-
# try:
|
57 |
-
# if isinstance(doc.metadata["image"], str):
|
58 |
-
# if doc.metadata["image"].startswith('/9j/'):
|
59 |
-
# import base64
|
60 |
-
# from io import BytesIO
|
61 |
-
# from PIL import Image
|
62 |
-
|
63 |
-
# img_bytes = base64.b64decode(doc.metadata["image"])
|
64 |
-
# img = Image.open(BytesIO(img_bytes))
|
65 |
-
# st.image(img, caption=f"Изображение {i+1}")
|
66 |
-
# else:
|
67 |
-
# st.image(doc.metadata["image"], caption=f"Изображение {i+1}")
|
68 |
-
# elif isinstance(doc.metadata["image"], bytes):
|
69 |
-
# st.image(doc.metadata["image"], caption=f"Изображение {i+1}")
|
70 |
-
# except Exception as e:
|
71 |
-
# st.error(f"Ошибка загрузки изображения: {str(e)}")
|
72 |
-
|
73 |
st.session_state.messages.append({"role": "assistant", "content": answer})
|
|
|
1 |
+
import os
|
2 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
3 |
+
os.environ["NO_CUDA_EXT"] = "1"
|
4 |
+
|
5 |
+
|
6 |
+
from reader_llm import get_reader_llm
|
7 |
+
from retrieval import get_retriever
|
8 |
+
from answer_rag import answer_with_rag2
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
# Настройка страницы
|
12 |
+
st.set_page_config(page_title="RAG", layout="wide")
|
13 |
+
st.title("Туристический путеводитель")
|
14 |
+
st.header("Города: Ярославль, Екатеринбург, Нижний Новгород, Владимир")
|
15 |
+
|
16 |
+
@st.cache_resource
|
17 |
+
def load_models():
|
18 |
+
READER_LLM = get_reader_llm(name="Vikhrmodels/Vikhr-Llama-3.2-1B-Instruct")
|
19 |
+
# легкая модель для приложения на сайте hugging face
|
20 |
+
embedding_model, KNOWLEDGE_VECTOR_DATABASE = get_retriever()
|
21 |
+
return READER_LLM, embedding_model, KNOWLEDGE_VECTOR_DATABASE
|
22 |
+
|
23 |
+
READER_LLM, _, KNOWLEDGE_VECTOR_DATABASE = load_models()
|
24 |
+
|
25 |
+
if "messages" not in st.session_state:
|
26 |
+
st.session_state.messages = []
|
27 |
+
|
28 |
+
for message in st.session_state.messages:
|
29 |
+
with st.chat_message(message["role"]):
|
30 |
+
st.markdown(message["content"])
|
31 |
+
|
32 |
+
if prompt := st.chat_input("Задайте Ваш вопрос"):
|
33 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
34 |
+
|
35 |
+
with st.chat_message("user"):
|
36 |
+
st.markdown(prompt)
|
37 |
+
|
38 |
+
with st.chat_message("assistant"):
|
39 |
+
with st.spinner("Ищу информацию..."):
|
40 |
+
answer, sources = answer_with_rag2(
|
41 |
+
question=prompt,
|
42 |
+
llm=READER_LLM,
|
43 |
+
knowledge_index=KNOWLEDGE_VECTOR_DATABASE
|
44 |
+
)
|
45 |
+
st.markdown(answer)
|
46 |
+
|
47 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
st.session_state.messages.append({"role": "assistant", "content": answer})
|