File size: 9,432 Bytes
1761643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import base64
import os
from threading import Thread
import time
import json

from dotenv import load_dotenv, find_dotenv
import requests

found_dotenv = find_dotenv(".env")

if len(found_dotenv) == 0:
    found_dotenv = find_dotenv(".env.example")
print(f"loading env vars from: {found_dotenv}")
load_dotenv(found_dotenv, override=False)

from openai import OpenAI


def make_request_with_retry(url, headers={}, data=None, retries=5, suppress_data=False):
    start = time.time()

    for _ in range(retries):
        try:
            print(
                f"Making request to {url}{f' with data: {data}' if data and not suppress_data else ''}"
            )

            response = (
                requests.post(url, headers=headers, data=json.dumps(data))
                if data
                else requests.get(url, headers=headers)
            )

            response_code = response.status_code
            print(f"Response code: {response_code}")

            if response_code != 200:
                raise Exception(f"Failed to make a successful request: {response.text}")

            end = time.time()
            print(f"Duration: {end - start:.3f} seconds")
            return response
        except Exception as e:
            print(f"An error occurred: {str(e)}")
            time.sleep(1)

    raise Exception(f"Failed to make a successful request after {retries} retries")


garments_json_file = os.environ.get("GARMENTS_FILE_PATH")
garments_json = json.load(open(garments_json_file))

system_instructions = f"""As a virtual assistant for an online clothing store, your tasks involve interpreting customer inquiries to identify whether they are looking for product recommendations, wish to virtually try on apparel, or intend to purchase. Provide relevant product IDs based on their current or past interactions. Consider detailed inquiries as virtual try-on requests and intentions to buy as 'add-to-cart' actions. Handle cart inquiries with a 'view-cart' action. For purchase readiness, proceed to 'checkout', issuing a gratitude message with the total cost and item details. If intentions are vague, request clarification and classify as "unknown".

Please output valid JSON, strictly following this Pydantic specification:

class Response(BaseModel):
    intent: Literal["recommendation", "try-on", "add-to-cart", "view-cart", "checkout", "unknown"]
    products: conlist(min_length=0, max_length=4)
    message: Optional[str] = None

Do not include any message in the response unless the customer's intent is "checkout" or "unknown".

When providing recommendations, please consider the customer's gender, which may be deduced from the pronouns in their initial message in the chat history. For example, avoid suggesting women's clothing to a man and vice versa.

JSON data:
{garments_json}
"""


class LLMRecommender:
    def __init__(self):
        self.model = os.environ.get("OPENAI_MODEL_NAME")
        self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
        print(f"Creating recommender with model: {self.model}")
        self.login()

    def login(self, user_name="Jerry", base64_image=None):
        self.user_name = user_name.split()[0]
        gender = None
        if base64_image:
            api_key = os.environ["OPENAI_API_KEY"]

            # Prepare the image content for payload
            image_content = {
                "type": "image_url",
                "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
            }

            # The text content remains the same
            text_content = {
                "type": "text",
                "text": "What's the gender of the person in the photo? Please respond with 'male' or 'female' only.",
            }

            # Combine the text content with the image contents
            combined_contents = [text_content] + [image_content]

            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {api_key}",
            }

            data = {
                "model": "gpt-4-vision-preview",
                "messages": [{"role": "user", "content": combined_contents}],
                "max_tokens": 4000,
            }
            response = make_request_with_retry(
                "https://api.openai.com/v1/chat/completions",
                headers,
                data,
                suppress_data=True,
            )
            response_json = response.json()
            try:
                gender = response_json["choices"][0]["message"]["content"]
                print(gender)
            except KeyError:
                print("The 'choices' key is missing in the response. Full response:")
                print(response_json)

        if gender:
            pronouns = "he/him/his" if gender == "male" else "she/her/hers"
        self.messages = [
            {"role": "system", "content": system_instructions},
            {
                "role": "user",
                "content": f"Hello, I am {self.user_name}{f' and my pronouns are {pronouns}.' if gender else '.'}",
            },
        ]
        print(f"LLMRecommender - {self.user_name} logged in")
        print(self.messages[1])
        return gender

    # Function to add a message to the conversation history
    def add_message(self, role, content):
        self.messages.append({"role": role, "content": content})

    def invoke(self, user_prompt, callback=None):
        print("LLMRecommender - invoke: ", user_prompt)
        user_prompt = user_prompt.strip()
        if not user_prompt:
            print("LLMRecommender - ignoring empty user_prompt")
            return ""

        start = time.time()

        self.add_message("user", user_prompt)
        retries = 5
        for _ in range(retries):
            try:
                chat_completion, *_ = self.client.chat.completions.create(
                    messages=self.messages,
                    model=self.model,
                    response_format={"type": "json_object"},
                    temperature=0.2,
                    n=1,
                ).choices
                break
            except Exception as e:
                print(f"An error occurred: {str(e)}")
                time.sleep(1)
        else:
            raise Exception(
                f"Failed to make a successful request after {retries} retries"
            )

        end = time.time()

        content = chat_completion.message.content
        self.add_message("assistant", content)
        print("Chat completion response:", content)
        print(f"Duration: {end - start:.3f} seconds")

        structured_response = json.loads(content)
        if "products" in structured_response:
            structured_response["products"] = structured_response["products"][:4]
            if "products" in structured_response:
                products = structured_response["products"][:4]
                structured_response["products"] = [
                    product if isinstance(product, str) else product["id"]
                    for product in products
                ]

        if callback:
            callback(structured_response)

        return structured_response

    def ainvoke(self, input, callback=None):
        t = Thread(
            target=self.invoke,
            args=(input, callback),
        )
        t.start()
        self.task = t

    def wait(self):
        if self.task:
            self.task.join()
            self.task = None

    def get_garment(self, garment_id):
        for garment in garments_json:
            product = garment["product"]
            if product["id"] == garment_id:
                return product
        return None

    def get_garments(self):
        products = []
        for garment in garments_json:
            product = garment["product"]
            products.append(product)

        return products


if __name__ == "__main__":
    llmr = LLMRecommender()

    def test(image_path=None):
        base64_image = None
        name = "Sam Smith"
        if image_path:
            name = image_path.split("/")[-1].split(".")[0]
            with open(image_path, "rb") as image_file:
                base64_image = base64.b64encode(image_file.read()).decode("utf-8")

        llmr.login(name, base64_image)

        llmr.ainvoke("hi", lambda x: print(json.dumps(x, indent=4)))
        llmr.wait()

        response = llmr.invoke("party wear")
        print(json.dumps(response, indent=4))

        llmr.ainvoke("try 2", lambda x: print(json.dumps(x, indent=4)))
        llmr.wait()

        response = llmr.invoke("add 2 to cart")
        print(json.dumps(response, indent=4))

        response = llmr.invoke("show more info on 8")
        print(json.dumps(response, indent=4))

        response = llmr.invoke("I'd like to buy this")
        print(json.dumps(response, indent=4))

        response = llmr.invoke("view cart")
        print(json.dumps(response, indent=4))

        response = llmr.invoke("proceed to payment")
        print(json.dumps(response, indent=4))

        response = llmr.invoke("view cart")
        print(json.dumps(response, indent=4))

    start = time.time()
    argv = os.sys.argv
    if len(argv) > 1:
        test(argv[1])
    else:
        test("./data/Assets/models/female.png")
        test("./data/Assets/models/male.png")
        test()
    end = time.time()

    print(f"Duration: {end - start:.3f} seconds")