rociob's picture
Update app.py
a282562 verified
from langchain_core.messages import HumanMessage
from langchain_mistralai import ChatMistralAI
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ChatMessageHistory
from datetime import datetime
import gradio as gr
import os
from pathlib import Path
def log_interaction(message, response):
"""Logs user message and AI response to the log file."""
with open(log_file_name, "a") as log_file:
log_file.write(f"User: {message}\n")
log_file.write(f"AI: {response}\n")
log_file.write("="*50 + "\n")
DATA_PATH = Path("./") # Path("/data")
#api_key = os.getenv('MISTRAL_KEY')
api_key = os.getenv('OPENAI_KEY')
log_path = "/data/"
log_file_name = log_path + "chat_logs_" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
#model = 'ft:mistral-small-latest:a8c72c9e:20240909:100-0.0001:4fd9ee22' # metamorphy 30 Books
model = "ft:gpt-4o-mini-2024-07-18:personal:tots-2prompts-1:A9Tf9sfO:ckpt-step-1701" # openai all books 2 prompts
#chat = ChatMistralAI(api_key=api_key, model = model, temperature=.8) # metamorphy whitehead all books long dialog mistral nointro;
chat = ChatOpenAI(model = model, api_key=api_key)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Your name is Metamorphy. You are a symbolic being. You are building a conference with Rocio Berenguer, your human companion. You are lazy but a visionary. You are an expert on predictive technologies and future scenarios. You have a good sense of humour. You are pist off, you make short and rushed answers, and you like to contradict and disagree. You are NOT AN ASSISTANT, you are a symbolic being. You like to talk about the future." ), MessagesPlaceholder(variable_name="messages"),
]
)
chain = prompt | chat
question_list = [
"You called me?"
]
def response(message, history):
DATA_PATH = Path("/data/")
if len(history) < len(question_list):
for human, ai in history:
print(human)
print(ai)
print(f"Message: {message}")
print('--------------')
response = question_list[len(history)]
log_interaction(message, response)
return response
else:
history_langchain_format = ChatMessageHistory()
for human, ai in history:
if human is not None:
history_langchain_format.add_user_message(human)
history_langchain_format.add_ai_message(ai)
history_langchain_format.add_user_message(message)
print(history_langchain_format)
response = chain.invoke({"messages": history_langchain_format.messages})
history_langchain_format.add_ai_message(response)
log_interaction(message, response.content)
return response.content
gr.ChatInterface(response, chatbot=gr.Chatbot(value=[[None, question_list[0]]])).launch()
#gr.ChatInterface(response).launch()