Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -18,55 +18,79 @@ from langchain_core.chat_history import BaseChatMessageHistory 
     | 
|
| 18 | 
         
             
            from langchain.memory import ConversationBufferMemory
         
     | 
| 19 | 
         
             
            from langchain_core.runnables.history import RunnableWithMessageHistory
         
     | 
| 20 | 
         | 
| 21 | 
         
            -
            @st.cache_resource
         
     | 
| 22 | 
         
            -
            def get_llm_chain():
         
     | 
| 23 | 
         
            -
                return custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory)
         
     | 
| 24 | 
         | 
| 25 | 
         
            -
            if 'memory' not in st.session_state:
         
     | 
| 26 | 
         
            -
                st.session_state['memory'] = ConversationBufferMemory(return_messages=True)
         
     | 
| 27 | 
         
            -
                st.session_state.memory.chat_memory.add_ai_message("Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?")
         
     | 
| 28 | 
         | 
| 29 | 
         
            -
            if 'chain' not in st.session_state:
         
     | 
| 30 | 
         
            -
                # st.session_state['chain'] = custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory)
         
     | 
| 31 | 
         
            -
                st.session_state['chain'] = get_llm_chain()
         
     | 
| 32 | 
         
            -
                # st.session_state['chain'] = custom_chain_with_history(llm=InferenceClient("https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", headers = {"Authorization": f"Bearer {API_TOKEN}"}, stream=True, max_new_tokens=512, temperature=0.01), memory=st.session_state.memory)
         
     | 
| 33 | 
         
             
            st.title("LMD Chatbot Tiket Ebesha Management")
         
     | 
| 34 | 
         
             
            st.subheader("Monthly Ticket Sample")
         
     | 
| 35 | 
         | 
| 36 | 
         
            -
            # Initialize chat history
         
     | 
| 37 | 
         
            -
            if "messages" not in st.session_state:
         
     | 
| 38 | 
         
            -
                st.session_state.messages = [{"role":"assistant", "content":"Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?"}]
         
     | 
| 39 | 
         | 
| 40 | 
         
            -
             
     | 
| 41 | 
         
            -
             
     | 
| 42 | 
         
            -
             
     | 
| 43 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 44 | 
         | 
| 45 | 
         
            -
            # React to user input
         
     | 
| 46 | 
         
            -
            if prompt := st.chat_input("Ask me anything.."):
         
     | 
| 47 | 
         
            -
                # Display user message in chat message container
         
     | 
| 48 | 
         
            -
                st.chat_message("User").markdown(prompt)
         
     | 
| 49 | 
         
            -
                # Add user message to chat history
         
     | 
| 50 | 
         
            -
                st.session_state.messages.append({"role": "User", "content": prompt})
         
     | 
| 51 | 
         
            -
                
         
     | 
| 52 | 
         
            -
                # full_response = st.session_state.chain.invoke(prompt).split("\n<|")[0]
         
     | 
| 53 | 
         
            -
                full_response = st.session_state.chain.invoke({"question":prompt, "memory":st.session_state.memory}).split("\n<|")[0]
         
     | 
| 54 | 
         
            -
                
         
     | 
| 55 | 
         | 
| 56 | 
         
            -
                with st.chat_message("assistant"):
         
     | 
| 57 | 
         
            -
                    st.markdown(full_response)
         
     | 
| 58 | 
         | 
| 59 | 
         
            -
             
     | 
| 60 | 
         
            -
                 
     | 
| 61 | 
         
            -
                 
     | 
| 62 | 
         
            -
             
     | 
| 63 | 
         
            -
             
     | 
| 64 | 
         
            -
                 
     | 
| 65 | 
         
            -
                 
     | 
| 66 | 
         
            -
             
     | 
| 67 | 
         
            -
                 
     | 
| 68 | 
         
            -
             
     | 
| 69 | 
         
            -
             
     | 
| 70 | 
         
            -
             
     | 
| 71 | 
         
            -
                 
     | 
| 72 | 
         
            -
                st.session_state 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 18 | 
         
             
            from langchain.memory import ConversationBufferMemory
         
     | 
| 19 | 
         
             
            from langchain_core.runnables.history import RunnableWithMessageHistory
         
     | 
| 20 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 21 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 22 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 23 | 
         
             
            st.title("LMD Chatbot Tiket Ebesha Management")
         
     | 
| 24 | 
         
             
            st.subheader("Monthly Ticket Sample")
         
     | 
| 25 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 26 | 
         | 
| 27 | 
         
            +
            uploaded_files = st.file_uploader("Choose CSV or XLSX files", accept_multiple_files=True, type=["csv", "xlsx"])
         
     | 
| 28 | 
         
            +
            df_temp = []
         
     | 
| 29 | 
         
            +
            for uploaded_file in uploaded_files:
         
     | 
| 30 | 
         
            +
                if uploaded_file.name.split(".")[-1] != 'csv':
         
     | 
| 31 | 
         
            +
                    a = pd.read_excel(uploaded_file)
         
     | 
| 32 | 
         
            +
                    uploaded_file = uploaded_file.name.split(".")[0]+".csv"
         
     | 
| 33 | 
         
            +
                    a.to_csv(uploaded_file, encoding="utf8", header=True, index=False)
         
     | 
| 34 | 
         
            +
                    
         
     | 
| 35 | 
         
            +
                df_temp.append(pd.read_csv(uploaded_file))
         
     | 
| 36 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 37 | 
         | 
| 
         | 
|
| 
         | 
|
| 38 | 
         | 
| 39 | 
         
            +
            if uploaded_file:
         
     | 
| 40 | 
         
            +
                
         
     | 
| 41 | 
         
            +
                if "df" not in st.session_state:
         
     | 
| 42 | 
         
            +
                    st.session_state.df = pd.concat(df_temp)
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
                @st.cache_resource
         
     | 
| 45 | 
         
            +
                def get_llm_chain():
         
     | 
| 46 | 
         
            +
                    return custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory, dataframe=st.session_state.df)
         
     | 
| 47 | 
         
            +
                
         
     | 
| 48 | 
         
            +
                if 'memory' not in st.session_state:
         
     | 
| 49 | 
         
            +
                    st.session_state['memory'] = ConversationBufferMemory(return_messages=True)
         
     | 
| 50 | 
         
            +
                    st.session_state.memory.chat_memory.add_ai_message("Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?")
         
     | 
| 51 | 
         
            +
                
         
     | 
| 52 | 
         
            +
                if 'chain' not in st.session_state:
         
     | 
| 53 | 
         
            +
                    # st.session_state['chain'] = custom_chain_with_history(llm=CustomLLM(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_type='text-generation', api_token=API_TOKEN, stop=["\n<|","<|"]), memory=st.session_state.memory)
         
     | 
| 54 | 
         
            +
                    st.session_state['chain'] = get_llm_chain()
         
     | 
| 55 | 
         
            +
                    # st.session_state['chain'] = custom_chain_with_history(llm=InferenceClient("https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", headers = {"Authorization": f"Bearer {API_TOKEN}"}, stream=True, max_new_tokens=512, temperature=0.01), memory=st.session_state.memory)
         
     | 
| 56 | 
         
            +
                
         
     | 
| 57 | 
         
            +
                    
         
     | 
| 58 | 
         
            +
                # Initialize chat history
         
     | 
| 59 | 
         
            +
                if "messages" not in st.session_state:
         
     | 
| 60 | 
         
            +
                    st.session_state.messages = [{"role":"assistant", "content":"Hello there! I'm AI assistant of Lintas Media Danawa. How can I help you today?"}]
         
     | 
| 61 | 
         
            +
                
         
     | 
| 62 | 
         
            +
                # Display chat messages from history on app rerun
         
     | 
| 63 | 
         
            +
                for message in st.session_state.messages:
         
     | 
| 64 | 
         
            +
                    with st.chat_message(message["role"]):
         
     | 
| 65 | 
         
            +
                        st.markdown(message["content"])
         
     | 
| 66 | 
         
            +
                
         
     | 
| 67 | 
         
            +
                
         
     | 
| 68 | 
         
            +
                
         
     | 
| 69 | 
         
            +
                # React to user input
         
     | 
| 70 | 
         
            +
                if prompt := st.chat_input("Ask me anything.."):
         
     | 
| 71 | 
         
            +
                    # Display user message in chat message container
         
     | 
| 72 | 
         
            +
                    st.chat_message("User").markdown(prompt)
         
     | 
| 73 | 
         
            +
                    # Add user message to chat history
         
     | 
| 74 | 
         
            +
                    st.session_state.messages.append({"role": "User", "content": prompt})
         
     | 
| 75 | 
         
            +
                    
         
     | 
| 76 | 
         
            +
                    # full_response = st.session_state.chain.invoke(prompt).split("\n<|")[0]
         
     | 
| 77 | 
         
            +
                    full_response = st.session_state.chain.invoke({"question":prompt, "memory":st.session_state.memory}).split("\n<|")[0]
         
     | 
| 78 | 
         
            +
                    
         
     | 
| 79 | 
         
            +
                
         
     | 
| 80 | 
         
            +
                    with st.chat_message("assistant"):
         
     | 
| 81 | 
         
            +
                        st.markdown(full_response)
         
     | 
| 82 | 
         
            +
                
         
     | 
| 83 | 
         
            +
                    # Display assistant response in chat message container
         
     | 
| 84 | 
         
            +
                    # with st.chat_message("assistant"):
         
     | 
| 85 | 
         
            +
                    #     message_placeholder = st.empty()
         
     | 
| 86 | 
         
            +
                    #     full_response = ""
         
     | 
| 87 | 
         
            +
                    #     for chunk in st.session_state.chain.stream(prompt):
         
     | 
| 88 | 
         
            +
                    #         full_response += chunk + " "
         
     | 
| 89 | 
         
            +
                    #         message_placeholder.markdown(full_response + " ")
         
     | 
| 90 | 
         
            +
                    #         if full_response[-4:] == "\n<|":
         
     | 
| 91 | 
         
            +
                    #             break
         
     | 
| 92 | 
         
            +
                        # st.markdown(full_response)
         
     | 
| 93 | 
         
            +
                    st.session_state.memory.save_context({"question":prompt}, {"output":full_response})
         
     | 
| 94 | 
         
            +
                    st.session_state.memory.chat_memory.messages = st.session_state.memory.chat_memory.messages[-15:]
         
     | 
| 95 | 
         
            +
                    # Add assistant response to chat history
         
     | 
| 96 | 
         
            +
                    st.session_state.messages.append({"role": "assistant", "content": full_response})
         
     |