File size: 3,675 Bytes
d3fb87a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import datetime
from chat_anything.chatbot.personality import generate_personality_prompt
from langchain.prompts import PromptTemplate
from langchain import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
import os
import random
import string


def load_chain(llm, class_concept=None):
    chain = None
    memory = None
    personality_text = None
    print(llm)
    if llm:
        print("class_concept", class_concept)
        if class_concept is None:
            class_concept = 'AI assistant'
        person_template, personality_text = generate_personality_prompt(llm, class_concept)

        PROMPT_TEMPLATE = PromptTemplate(
            input_variables=["history", "input"],
            template=person_template,
        )

        chain = ConversationChain(
            prompt=PROMPT_TEMPLATE,
            llm=llm,
            verbose=False,
            memory=ConversationBufferMemory(ai_prefix="You"),
        )
        print("New concept done for ", class_concept)

    return chain, memory, personality_text



def set_openai_api_key(api_key, use_gpt4, history=None, max_tokens=1024, use_local_llm=True):
    """Set the api key and return chain.
    If no api_key, then None is returned.
    """
    if (api_key and api_key.startswith("sk-") and len(api_key) > 50):
        os.environ["OPENAI_API_KEY"] = api_key
        print("\n\n ++++++++++++++ Setting OpenAI API key ++++++++++++++ \n\n")
        print(str(datetime.datetime.now()) + ": Before OpenAI, OPENAI_API_KEY length: " + str(
            len(os.environ["OPENAI_API_KEY"])))

        
        if use_gpt4:
            llm = ChatOpenAI(
                temperature=0, max_tokens=max_tokens, model_name="gpt-4")
            print("Trying to use llm ChatOpenAI with gpt-4")
        else:
            print("Trying to use llm ChatOpenAI with gpt-3.5-turbo")
            llm = ChatOpenAI(temperature=0, max_tokens=max_tokens,
                             model_name="gpt-3.5-turbo")

        print(str(datetime.datetime.now()) + ": After OpenAI, OPENAI_API_KEY length: " + str(
            len(os.environ["OPENAI_API_KEY"])))

        print(str(datetime.datetime.now()) + ": After load_chain, OPENAI_API_KEY length: " + str(
            len(os.environ["OPENAI_API_KEY"])))
        os.environ["OPENAI_API_KEY"] = ""
        history = history or []
        history.append(['', '[SYSTEM] OPENAI_API_KEY has been set, you can generate your object and talk to it now!'])
        uid = ''.join(random.sample(string.ascii_lowercase + string.ascii_uppercase, 5))
        video_file_path = os.path.join('tmp', uid, 'videos/tempfile.mp4')
        audio_file_path = os.path.join('tmp', uid, 'audio/tempfile.mp3')
        return llm, use_gpt4, history, uid, video_file_path, audio_file_path
    elif use_local_llm:
        os.environ["OPENAI_API_KEY"] = 'EMPTY'
        print("\n\n ++++++++++++++ Using local LLM  ++++++++++++++ \n\n")

        llm = ChatOpenAI(temperature=0, max_tokens=max_tokens,
                            model_name="gpt-3.5-turbo")
        history = history or []
        history.append(['', '[SYSTEM] You are using free local LLM, you can generate your object and talk to it now!'])
        uid = ''.join(random.sample(string.ascii_lowercase + string.ascii_uppercase, 5))
        video_file_path = os.path.join('tmp', uid, 'videos/tempfile.mp4')
        audio_file_path = os.path.join('tmp', uid, 'audio/tempfile.mp3')
        return llm, use_gpt4, history, uid, video_file_path, audio_file_path
    
    return None, None, None, None, None, None