File size: 1,646 Bytes
56487d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
# llm.py
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import GoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_community.chat_models import ChatOllama
from langchain_core.runnables import ConfigurableField
from langchain.callbacks.base import BaseCallbackHandler
class StreamCallback(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs):
print(token, end="", flush=True)
def get_llm(streaming=True):
return ChatOpenAI(
model="gpt-4",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="gpt4",
claude=ChatAnthropic(
model="claude-3-opus-20240229",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
gpt3=ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
gemini=GoogleGenerativeAI(
model="gemini-1.5-flash",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
llama3=ChatGroq(
model_name="llama3-70b-8192",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
ollama=ChatOllama(
model="EEVE-Korean-10.8B:long",
streaming=streaming,
callbacks=[StreamCallback()],
),
)
|