Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Add requirements
Browse files- .ipynb_checkpoints/app-checkpoint.py +85 -0
- .ipynb_checkpoints/requirements-checkpoint.txt +11 -0
- app.py +85 -0
- data/xtuner +1 -0
- requirements.txt +11 -0
    	
        .ipynb_checkpoints/app-checkpoint.py
    ADDED
    
    | @@ -0,0 +1,85 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import streamlit as st
         | 
| 2 | 
            +
            from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
         | 
| 3 | 
            +
            from llama_index.embeddings.huggingface import HuggingFaceEmbedding
         | 
| 4 | 
            +
            from llama_index.legacy.callbacks import CallbackManager
         | 
| 5 | 
            +
            from llama_index.llms.openai_like import OpenAILike
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            st.set_page_config(page_title="llama_index_demo", page_icon="  ")
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            # Create an instance of CallbackManager
         | 
| 10 | 
            +
            callback_manager = CallbackManager()
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            api_base_url =  "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
         | 
| 13 | 
            +
            model = "internlm2.5-latest"
         | 
| 14 | 
            +
            api_key = st.sidebar.text_input('API Key', value='', type='password')
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # api_base_url =  "https://api.siliconflow.cn/v1"
         | 
| 17 | 
            +
            # model = "internlm/internlm2_5-7b-chat"
         | 
| 18 | 
            +
            # api_key = "请填写 API Key"
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
         | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
         | 
| 25 | 
            +
            st.title("llama_index_demo")
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            # 初始化模型
         | 
| 28 | 
            +
            @st.cache_resource
         | 
| 29 | 
            +
            def init_models():
         | 
| 30 | 
            +
                embed_model = HuggingFaceEmbedding(
         | 
| 31 | 
            +
                    model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
         | 
| 32 | 
            +
                )
         | 
| 33 | 
            +
                Settings.embed_model = embed_model
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                #用初始化llm
         | 
| 36 | 
            +
                Settings.llm = llm
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                documents = SimpleDirectoryReader("./data").load_data()
         | 
| 39 | 
            +
                index = VectorStoreIndex.from_documents(documents)
         | 
| 40 | 
            +
                query_engine = index.as_query_engine()
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                return query_engine
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            # 检查是否需要初始化模型
         | 
| 45 | 
            +
            if 'query_engine' not in st.session_state:
         | 
| 46 | 
            +
                st.session_state['query_engine'] = init_models()
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            def greet2(question):
         | 
| 49 | 
            +
                response = st.session_state['query_engine'].query(question)
         | 
| 50 | 
            +
                return response
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                  
         | 
| 53 | 
            +
            # Store LLM generated responses
         | 
| 54 | 
            +
            if "messages" not in st.session_state.keys():
         | 
| 55 | 
            +
                st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]    
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                # Display or clear chat messages
         | 
| 58 | 
            +
            for message in st.session_state.messages:
         | 
| 59 | 
            +
                with st.chat_message(message["role"]):
         | 
| 60 | 
            +
                    st.write(message["content"])
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            def clear_chat_history():
         | 
| 63 | 
            +
                st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
         | 
| 64 | 
            +
             | 
| 65 | 
            +
            st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
            # Function for generating LLaMA2 response
         | 
| 68 | 
            +
            def generate_llama_index_response(prompt_input):
         | 
| 69 | 
            +
                return greet2(prompt_input)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
            # User-provided prompt
         | 
| 72 | 
            +
            if prompt := st.chat_input():
         | 
| 73 | 
            +
                st.session_state.messages.append({"role": "user", "content": prompt})
         | 
| 74 | 
            +
                with st.chat_message("user"):
         | 
| 75 | 
            +
                    st.write(prompt)
         | 
| 76 | 
            +
             | 
| 77 | 
            +
            # Gegenerate_llama_index_response last message is not from assistant
         | 
| 78 | 
            +
            if st.session_state.messages[-1]["role"] != "assistant":
         | 
| 79 | 
            +
                with st.chat_message("assistant"):
         | 
| 80 | 
            +
                    with st.spinner("Thinking..."):
         | 
| 81 | 
            +
                        response = generate_llama_index_response(prompt)
         | 
| 82 | 
            +
                        placeholder = st.empty()
         | 
| 83 | 
            +
                        placeholder.markdown(response)
         | 
| 84 | 
            +
                message = {"role": "assistant", "content": response}
         | 
| 85 | 
            +
                st.session_state.messages.append(message)
         | 
    	
        .ipynb_checkpoints/requirements-checkpoint.txt
    ADDED
    
    | @@ -0,0 +1,11 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            einops==0.7.0
         | 
| 2 | 
            +
            protobuf==5.26.1
         | 
| 3 | 
            +
            llama-index==0.11.20
         | 
| 4 | 
            +
            llama-index-llms-replicate==0.3.0
         | 
| 5 | 
            +
            llama-index-llms-openai-like==0.2.0
         | 
| 6 | 
            +
            llama-index-embeddings-huggingface==0.3.1
         | 
| 7 | 
            +
            llama-index-embeddings-instructor==0.2.1
         | 
| 8 | 
            +
            torch==2.5.0
         | 
| 9 | 
            +
            torchvision==0.20.0
         | 
| 10 | 
            +
            torchaudio==2.5.0
         | 
| 11 | 
            +
            openai
         | 
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,85 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import streamlit as st
         | 
| 2 | 
            +
            from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
         | 
| 3 | 
            +
            from llama_index.embeddings.huggingface import HuggingFaceEmbedding
         | 
| 4 | 
            +
            from llama_index.legacy.callbacks import CallbackManager
         | 
| 5 | 
            +
            from llama_index.llms.openai_like import OpenAILike
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            st.set_page_config(page_title="llama_index_demo", page_icon="  ")
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            # Create an instance of CallbackManager
         | 
| 10 | 
            +
            callback_manager = CallbackManager()
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            api_base_url =  "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
         | 
| 13 | 
            +
            model = "internlm2.5-latest"
         | 
| 14 | 
            +
            api_key = st.sidebar.text_input('API Key', value='', type='password')
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # api_base_url =  "https://api.siliconflow.cn/v1"
         | 
| 17 | 
            +
            # model = "internlm/internlm2_5-7b-chat"
         | 
| 18 | 
            +
            # api_key = "请填写 API Key"
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
         | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
         | 
| 25 | 
            +
            st.title("llama_index_demo")
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            # 初始化模型
         | 
| 28 | 
            +
            @st.cache_resource
         | 
| 29 | 
            +
            def init_models():
         | 
| 30 | 
            +
                embed_model = HuggingFaceEmbedding(
         | 
| 31 | 
            +
                    model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
         | 
| 32 | 
            +
                )
         | 
| 33 | 
            +
                Settings.embed_model = embed_model
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                #用初始化llm
         | 
| 36 | 
            +
                Settings.llm = llm
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                documents = SimpleDirectoryReader("./data").load_data()
         | 
| 39 | 
            +
                index = VectorStoreIndex.from_documents(documents)
         | 
| 40 | 
            +
                query_engine = index.as_query_engine()
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                return query_engine
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            # 检查是否需要初始化模型
         | 
| 45 | 
            +
            if 'query_engine' not in st.session_state:
         | 
| 46 | 
            +
                st.session_state['query_engine'] = init_models()
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            def greet2(question):
         | 
| 49 | 
            +
                response = st.session_state['query_engine'].query(question)
         | 
| 50 | 
            +
                return response
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                  
         | 
| 53 | 
            +
            # Store LLM generated responses
         | 
| 54 | 
            +
            if "messages" not in st.session_state.keys():
         | 
| 55 | 
            +
                st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]    
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                # Display or clear chat messages
         | 
| 58 | 
            +
            for message in st.session_state.messages:
         | 
| 59 | 
            +
                with st.chat_message(message["role"]):
         | 
| 60 | 
            +
                    st.write(message["content"])
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            def clear_chat_history():
         | 
| 63 | 
            +
                st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
         | 
| 64 | 
            +
             | 
| 65 | 
            +
            st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
            # Function for generating LLaMA2 response
         | 
| 68 | 
            +
            def generate_llama_index_response(prompt_input):
         | 
| 69 | 
            +
                return greet2(prompt_input)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
            # User-provided prompt
         | 
| 72 | 
            +
            if prompt := st.chat_input():
         | 
| 73 | 
            +
                st.session_state.messages.append({"role": "user", "content": prompt})
         | 
| 74 | 
            +
                with st.chat_message("user"):
         | 
| 75 | 
            +
                    st.write(prompt)
         | 
| 76 | 
            +
             | 
| 77 | 
            +
            # Gegenerate_llama_index_response last message is not from assistant
         | 
| 78 | 
            +
            if st.session_state.messages[-1]["role"] != "assistant":
         | 
| 79 | 
            +
                with st.chat_message("assistant"):
         | 
| 80 | 
            +
                    with st.spinner("Thinking..."):
         | 
| 81 | 
            +
                        response = generate_llama_index_response(prompt)
         | 
| 82 | 
            +
                        placeholder = st.empty()
         | 
| 83 | 
            +
                        placeholder.markdown(response)
         | 
| 84 | 
            +
                message = {"role": "assistant", "content": response}
         | 
| 85 | 
            +
                st.session_state.messages.append(message)
         | 
    	
        data/xtuner
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            Subproject commit 4cade9f547cbdf1ff5bada1c8a7f4a15a01c49ef
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,11 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            einops==0.7.0
         | 
| 2 | 
            +
            protobuf==5.26.1
         | 
| 3 | 
            +
            llama-index==0.11.20
         | 
| 4 | 
            +
            llama-index-llms-replicate==0.3.0
         | 
| 5 | 
            +
            llama-index-llms-openai-like==0.2.0
         | 
| 6 | 
            +
            llama-index-embeddings-huggingface==0.3.1
         | 
| 7 | 
            +
            llama-index-embeddings-instructor==0.2.1
         | 
| 8 | 
            +
            torch==2.5.0
         | 
| 9 | 
            +
            torchvision==0.20.0
         | 
| 10 | 
            +
            torchaudio==2.5.0
         | 
| 11 | 
            +
            openai
         |