style-master / style_master /LLMRecommender.py
inflaton's picture
init code
1761643
import base64
import os
from threading import Thread
import time
import json
from dotenv import load_dotenv, find_dotenv
import requests
found_dotenv = find_dotenv(".env")
if len(found_dotenv) == 0:
found_dotenv = find_dotenv(".env.example")
print(f"loading env vars from: {found_dotenv}")
load_dotenv(found_dotenv, override=False)
from openai import OpenAI
def make_request_with_retry(url, headers={}, data=None, retries=5, suppress_data=False):
start = time.time()
for _ in range(retries):
try:
print(
f"Making request to {url}{f' with data: {data}' if data and not suppress_data else ''}"
)
response = (
requests.post(url, headers=headers, data=json.dumps(data))
if data
else requests.get(url, headers=headers)
)
response_code = response.status_code
print(f"Response code: {response_code}")
if response_code != 200:
raise Exception(f"Failed to make a successful request: {response.text}")
end = time.time()
print(f"Duration: {end - start:.3f} seconds")
return response
except Exception as e:
print(f"An error occurred: {str(e)}")
time.sleep(1)
raise Exception(f"Failed to make a successful request after {retries} retries")
garments_json_file = os.environ.get("GARMENTS_FILE_PATH")
garments_json = json.load(open(garments_json_file))
system_instructions = f"""As a virtual assistant for an online clothing store, your tasks involve interpreting customer inquiries to identify whether they are looking for product recommendations, wish to virtually try on apparel, or intend to purchase. Provide relevant product IDs based on their current or past interactions. Consider detailed inquiries as virtual try-on requests and intentions to buy as 'add-to-cart' actions. Handle cart inquiries with a 'view-cart' action. For purchase readiness, proceed to 'checkout', issuing a gratitude message with the total cost and item details. If intentions are vague, request clarification and classify as "unknown".
Please output valid JSON, strictly following this Pydantic specification:
class Response(BaseModel):
intent: Literal["recommendation", "try-on", "add-to-cart", "view-cart", "checkout", "unknown"]
products: conlist(min_length=0, max_length=4)
message: Optional[str] = None
Do not include any message in the response unless the customer's intent is "checkout" or "unknown".
When providing recommendations, please consider the customer's gender, which may be deduced from the pronouns in their initial message in the chat history. For example, avoid suggesting women's clothing to a man and vice versa.
JSON data:
{garments_json}
"""
class LLMRecommender:
def __init__(self):
self.model = os.environ.get("OPENAI_MODEL_NAME")
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
print(f"Creating recommender with model: {self.model}")
self.login()
def login(self, user_name="Jerry", base64_image=None):
self.user_name = user_name.split()[0]
gender = None
if base64_image:
api_key = os.environ["OPENAI_API_KEY"]
# Prepare the image content for payload
image_content = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
}
# The text content remains the same
text_content = {
"type": "text",
"text": "What's the gender of the person in the photo? Please respond with 'male' or 'female' only.",
}
# Combine the text content with the image contents
combined_contents = [text_content] + [image_content]
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
data = {
"model": "gpt-4-vision-preview",
"messages": [{"role": "user", "content": combined_contents}],
"max_tokens": 4000,
}
response = make_request_with_retry(
"https://api.openai.com/v1/chat/completions",
headers,
data,
suppress_data=True,
)
response_json = response.json()
try:
gender = response_json["choices"][0]["message"]["content"]
print(gender)
except KeyError:
print("The 'choices' key is missing in the response. Full response:")
print(response_json)
if gender:
pronouns = "he/him/his" if gender == "male" else "she/her/hers"
self.messages = [
{"role": "system", "content": system_instructions},
{
"role": "user",
"content": f"Hello, I am {self.user_name}{f' and my pronouns are {pronouns}.' if gender else '.'}",
},
]
print(f"LLMRecommender - {self.user_name} logged in")
print(self.messages[1])
return gender
# Function to add a message to the conversation history
def add_message(self, role, content):
self.messages.append({"role": role, "content": content})
def invoke(self, user_prompt, callback=None):
print("LLMRecommender - invoke: ", user_prompt)
user_prompt = user_prompt.strip()
if not user_prompt:
print("LLMRecommender - ignoring empty user_prompt")
return ""
start = time.time()
self.add_message("user", user_prompt)
retries = 5
for _ in range(retries):
try:
chat_completion, *_ = self.client.chat.completions.create(
messages=self.messages,
model=self.model,
response_format={"type": "json_object"},
temperature=0.2,
n=1,
).choices
break
except Exception as e:
print(f"An error occurred: {str(e)}")
time.sleep(1)
else:
raise Exception(
f"Failed to make a successful request after {retries} retries"
)
end = time.time()
content = chat_completion.message.content
self.add_message("assistant", content)
print("Chat completion response:", content)
print(f"Duration: {end - start:.3f} seconds")
structured_response = json.loads(content)
if "products" in structured_response:
structured_response["products"] = structured_response["products"][:4]
if "products" in structured_response:
products = structured_response["products"][:4]
structured_response["products"] = [
product if isinstance(product, str) else product["id"]
for product in products
]
if callback:
callback(structured_response)
return structured_response
def ainvoke(self, input, callback=None):
t = Thread(
target=self.invoke,
args=(input, callback),
)
t.start()
self.task = t
def wait(self):
if self.task:
self.task.join()
self.task = None
def get_garment(self, garment_id):
for garment in garments_json:
product = garment["product"]
if product["id"] == garment_id:
return product
return None
def get_garments(self):
products = []
for garment in garments_json:
product = garment["product"]
products.append(product)
return products
if __name__ == "__main__":
llmr = LLMRecommender()
def test(image_path=None):
base64_image = None
name = "Sam Smith"
if image_path:
name = image_path.split("/")[-1].split(".")[0]
with open(image_path, "rb") as image_file:
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
llmr.login(name, base64_image)
llmr.ainvoke("hi", lambda x: print(json.dumps(x, indent=4)))
llmr.wait()
response = llmr.invoke("party wear")
print(json.dumps(response, indent=4))
llmr.ainvoke("try 2", lambda x: print(json.dumps(x, indent=4)))
llmr.wait()
response = llmr.invoke("add 2 to cart")
print(json.dumps(response, indent=4))
response = llmr.invoke("show more info on 8")
print(json.dumps(response, indent=4))
response = llmr.invoke("I'd like to buy this")
print(json.dumps(response, indent=4))
response = llmr.invoke("view cart")
print(json.dumps(response, indent=4))
response = llmr.invoke("proceed to payment")
print(json.dumps(response, indent=4))
response = llmr.invoke("view cart")
print(json.dumps(response, indent=4))
start = time.time()
argv = os.sys.argv
if len(argv) > 1:
test(argv[1])
else:
test("./data/Assets/models/female.png")
test("./data/Assets/models/male.png")
test()
end = time.time()
print(f"Duration: {end - start:.3f} seconds")