code
stringlengths
141
78.9k
apis
sequencelengths
1
23
extract_api
stringlengths
142
73.2k
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This would makes sure Python is aware there is more than one sub-package within bigdl, # physically located elsewhere. # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. # This file is adapted from # https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py # The MIT License # Copyright (c) Harrison Chase # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class TransformersPipelineLLM(LLM): """Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline() Example: .. code-block:: python from bigdl.llm.langchain.llms import TransformersPipelineLLM llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf") """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name or model path to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from bigdl.llm.transformers import ( AutoModel, AutoModelForCausalLM, # AutoModelForSeq2SeqLM, ) from transformers import AutoTokenizer, LlamaTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} # TODO: may refactore this code in the future try: tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) except: tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) elif task in ("text2text-generation", "summarization"): # TODO: support this when related PR merged model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device='cpu', # only cpu now model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "BigDL-llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif self.pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')]
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This would makes sure Python is aware there is more than one sub-package within bigdl, # physically located elsewhere. # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. # This file is adapted from # https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py # The MIT License # Copyright (c) Harrison Chase # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class TransformersPipelineLLM(LLM): """Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline() Example: .. code-block:: python from bigdl.llm.langchain.llms import TransformersPipelineLLM llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf") """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name or model path to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from bigdl.llm.transformers import ( AutoModel, AutoModelForCausalLM, # AutoModelForSeq2SeqLM, ) from transformers import AutoTokenizer, LlamaTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} # TODO: may refactore this code in the future try: tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) except: tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) elif task in ("text2text-generation", "summarization"): # TODO: support this when related PR merged model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device='cpu', # only cpu now model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "BigDL-llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif self.pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')]
from typing import AsyncGenerator, Optional, Tuple from langchain import ConversationChain import logging from typing import Optional, Tuple from pydantic.v1 import SecretStr from vocode.streaming.agent.base_agent import RespondAgent from vocode.streaming.agent.utils import get_sentence_from_buffer from langchain import ConversationChain from langchain.schema import ChatMessage, AIMessage, HumanMessage from langchain_community.chat_models import ChatAnthropic import logging from vocode import getenv from vocode.streaming.models.agent import ChatAnthropicAgentConfig from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, ) from vocode import getenv from vocode.streaming.models.agent import ChatAnthropicAgentConfig from langchain.memory import ConversationBufferMemory SENTENCE_ENDINGS = [".", "!", "?"] class ChatAnthropicAgent(RespondAgent[ChatAnthropicAgentConfig]): def __init__( self, agent_config: ChatAnthropicAgentConfig, logger: Optional[logging.Logger] = None, anthropic_api_key: Optional[SecretStr] = None, ): super().__init__(agent_config=agent_config, logger=logger) import anthropic # Convert anthropic_api_key to SecretStr if it's not None and not already a SecretStr if anthropic_api_key is not None and not isinstance( anthropic_api_key, SecretStr ): anthropic_api_key = SecretStr(anthropic_api_key) else: # Retrieve anthropic_api_key from environment variable and convert to SecretStr env_key = getenv("ANTHROPIC_API_KEY") if env_key: anthropic_api_key = SecretStr(env_key) if not anthropic_api_key: raise ValueError( "ANTHROPIC_API_KEY must be set in environment or passed in as a SecretStr" ) self.prompt = ChatPromptTemplate.from_messages( [ MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template("{input}"), ] ) self.llm = ChatAnthropic( model_name=agent_config.model_name, anthropic_api_key=anthropic_api_key, ) # streaming not well supported by langchain, so we will connect directly self.anthropic_client = ( anthropic.AsyncAnthropic(api_key=str(anthropic_api_key)) if agent_config.generate_responses else None ) self.memory = ConversationBufferMemory(return_messages=True) self.memory.chat_memory.messages.append( HumanMessage(content=self.agent_config.prompt_preamble) ) if agent_config.initial_message: self.memory.chat_memory.messages.append( AIMessage(content=agent_config.initial_message.text) ) self.conversation = ConversationChain( memory=self.memory, prompt=self.prompt, llm=self.llm ) async def respond( self, human_input, conversation_id: str, is_interrupt: bool = False, ) -> Tuple[str, bool]: text = await self.conversation.apredict(input=human_input) self.logger.debug(f"LLM response: {text}") return text, False async def generate_response( self, human_input, conversation_id: str, is_interrupt: bool = False, ) -> AsyncGenerator[Tuple[str, bool], None]: self.memory.chat_memory.messages.append(HumanMessage(content=human_input)) bot_memory_message = AIMessage(content="") self.memory.chat_memory.messages.append(bot_memory_message) prompt = self.llm._convert_messages_to_prompt(self.memory.chat_memory.messages) if self.anthropic_client: streamed_response = await self.anthropic_client.completions.create( prompt=prompt, max_tokens_to_sample=self.agent_config.max_tokens_to_sample, model=self.agent_config.model_name, stream=True, ) buffer = "" async for completion in streamed_response: buffer += completion.completion sentence, remainder = get_sentence_from_buffer(buffer) if sentence: bot_memory_message.content = bot_memory_message.content + sentence buffer = remainder yield sentence, True continue def update_last_bot_message_on_cut_off(self, message: str): for memory_message in self.memory.chat_memory.messages[::-1]: if ( isinstance(memory_message, ChatMessage) and memory_message.role == "assistant" ) or isinstance(memory_message, AIMessage): memory_message.content = message return
[ "langchain_community.chat_models.ChatAnthropic", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.memory.ConversationBufferMemory", "langchain.prompts.MessagesPlaceholder", "langchain.schema.HumanMessage", "langchain.schema.AIMessage", "langchain.ConversationChain" ]
[((2147, 2238), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model_name': 'agent_config.model_name', 'anthropic_api_key': 'anthropic_api_key'}), '(model_name=agent_config.model_name, anthropic_api_key=\n anthropic_api_key)\n', (2160, 2238), False, 'from langchain_community.chat_models import ChatAnthropic\n'), ((2556, 2602), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (2580, 2602), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2936, 3007), 'langchain.ConversationChain', 'ConversationChain', ([], {'memory': 'self.memory', 'prompt': 'self.prompt', 'llm': 'self.llm'}), '(memory=self.memory, prompt=self.prompt, llm=self.llm)\n', (2953, 3007), False, 'from langchain import ConversationChain\n'), ((3624, 3645), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '""""""'}), "(content='')\n", (3633, 3645), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1468, 1496), 'pydantic.v1.SecretStr', 'SecretStr', (['anthropic_api_key'], {}), '(anthropic_api_key)\n', (1477, 1496), False, 'from pydantic.v1 import SecretStr\n'), ((1625, 1652), 'vocode.getenv', 'getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (1631, 1652), False, 'from vocode import getenv\n'), ((2664, 2719), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'self.agent_config.prompt_preamble'}), '(content=self.agent_config.prompt_preamble)\n', (2676, 2719), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((3559, 3592), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_input'}), '(content=human_input)\n', (3571, 3592), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1713, 1731), 'pydantic.v1.SecretStr', 'SecretStr', (['env_key'], {}), '(env_key)\n', (1722, 1731), False, 'from pydantic.v1 import SecretStr\n'), ((1988, 2032), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (2007, 2032), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2050, 2101), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (2090, 2101), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2840, 2892), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'agent_config.initial_message.text'}), '(content=agent_config.initial_message.text)\n', (2849, 2892), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((4286, 4318), 'vocode.streaming.agent.utils.get_sentence_from_buffer', 'get_sentence_from_buffer', (['buffer'], {}), '(buffer)\n', (4310, 4318), False, 'from vocode.streaming.agent.utils import get_sentence_from_buffer\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
from waifu.llm.Brain import Brain from waifu.llm.VectorDB import VectorDB from waifu.llm.SentenceTransformer import STEmbedding from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from typing import Any, List, Mapping, Optional from langchain.schema import BaseMessage import openai class GPT(Brain): def __init__(self, api_key: str, name: str, stream: bool=False, callback=None, model: str='gpt-3.5-turbo', proxy: str=''): self.llm = ChatOpenAI(openai_api_key=api_key, model_name=model, streaming=stream, callbacks=[callback], temperature=0.85) self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model) self.embedding = OpenAIEmbeddings(openai_api_key=api_key) # self.embedding = STEmbedding() self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv') if proxy != '': openai.proxy = proxy def think(self, messages: List[BaseMessage]): return self.llm(messages).content def think_nonstream(self, messages: List[BaseMessage]): return self.llm_nonstream(messages).content def store_memory(self, text: str | list): '''保存记忆 embedding''' self.vectordb.store(text) def extract_memory(self, text: str, top_n: int = 10): '''提取 top_n 条相关记忆''' return self.vectordb.query(text, top_n)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')]
from waifu.llm.Brain import Brain from waifu.llm.VectorDB import VectorDB from waifu.llm.SentenceTransformer import STEmbedding from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from typing import Any, List, Mapping, Optional from langchain.schema import BaseMessage import openai class GPT(Brain): def __init__(self, api_key: str, name: str, stream: bool=False, callback=None, model: str='gpt-3.5-turbo', proxy: str=''): self.llm = ChatOpenAI(openai_api_key=api_key, model_name=model, streaming=stream, callbacks=[callback], temperature=0.85) self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model) self.embedding = OpenAIEmbeddings(openai_api_key=api_key) # self.embedding = STEmbedding() self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv') if proxy != '': openai.proxy = proxy def think(self, messages: List[BaseMessage]): return self.llm(messages).content def think_nonstream(self, messages: List[BaseMessage]): return self.llm_nonstream(messages).content def store_memory(self, text: str | list): '''保存记忆 embedding''' self.vectordb.store(text) def extract_memory(self, text: str, top_n: int = 10): '''提取 top_n 条相关记忆''' return self.vectordb.query(text, top_n)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')]
from time import sleep import copy import redis import json import pickle import traceback from flask import Response, request, stream_with_context from typing import Dict, Union import os from langchain.schema import HumanMessage, SystemMessage from backend.api.language_model import get_llm from backend.main import app, message_id_register, message_pool, logger from backend.utils.streaming import single_round_chat_with_agent_streaming from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL from backend.schemas import DEFAULT_USER_ID from real_agents.adapters.llm import BaseLanguageModel from real_agents.adapters.agent_helpers import AgentExecutor, Tool from real_agents.adapters.callbacks.agent_streaming import \ AgentStreamingStdOutCallbackHandler from real_agents.adapters.models import ChatOpenAI from real_agents.adapters.memory import ConversationReActBufferMemory from real_agents.adapters.data_model import DataModel, JsonDataModel from real_agents.adapters.interactive_executor import initialize_webot_agent from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed # here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess # fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time # fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor: data = r.get(f'webot_{user_id}_{chat_id}') if data is not None: webot = pickle.loads(data) else: # initialize a webot with None instrucition if webot does not exist webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) return webot def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ): r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot)) def get_webot_status_from_redis(user_id: str, chat_id: str): webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}') if webot_status_json is not None: webot_status = json.loads(webot_status_json) return webot_status else: return {} def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict): r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status)) def reset_webot(user_id: str, chat_id: str): webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) def reset_webot_status(user_id: str, chat_id: str): webot_status = {"webot_status": "idle", "url": None} save_webot_status_to_redis(user_id, chat_id, webot_status) # this function has been deprecated def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI): # fixme: Move this into a separate chain or executors to decompose the LLMs system_message = f""" You are a planner to assist another browser automation assistant. Here is the instruction for the other assistant: ``` You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist: 1. click(element): Clicks on an element 2. setValue(element, value: string): Focuses on and sets the value of an input element 3. finish(): Indicates the task is finished 4. fail(): Indicates that you are unable to complete the task You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time. This is an example of an action: <Thought>I should click the add to cart button</Thought> <Action>click(223)</Action> You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid. Rules you MUST follow: 1. You must only take one step at a time. You cannot take multiple actions in a single response. 2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish. ``` Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks. """.strip() human_message = f""" The user requests the following task: {instruction} Now you are at {start_url} Provide a plan to do this (you can use pseudo description as below to describe the item). Here is an example case: request: Go to google calendar to schedule a meeting current url: "https://google.com" example plan: 1. setValue(searchBar, "google calendar") 2. click(search) 3. click(the item with title of google calendar) 4.1 if user has loginned do nothing 4.2 if user hasn't loginned do login 5. click(create event button) 6. setValue(event title input bar, "meeting") 7. click(save event button) 8. finish() """.strip() messages = [SystemMessage(content=system_message), HumanMessage(content=human_message)] response = chat_llm(messages).content return response def create_webot_interaction_executor( llm: BaseLanguageModel, llm_name: str, user_id: str, chat_id: str ) -> AgentExecutor: """Creates an agent executor for interaction. Args: llm: A llm model. llm_name: A string llm name. user_id: A string of user id. chat_id: A string chat id. Returns: An agent executor. """ # Initialize memory memory = ConversationReActBufferMemory(memory_key="chat_history", return_messages=True, max_token_limit=10000) class RunWebot: def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str, chat_id: str): self.llm = llm self.webot = webot self.user_id = user_id self.chat_id = chat_id def run(self, term: str) -> Union[str, Dict, DataModel]: try: user_id = self.user_id chat_id = self.chat_id reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) raw_observation = self.webot.run(user_intent=term, llm=self.llm) instruction, start_url = raw_observation["instruction"], \ raw_observation["start_url"] webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = instruction # webot.plan = get_plan(instruction, start_url) webot.plan = "" save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) webot_status = { "webot_status": "running", "url": start_url } save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status=webot_status) while True: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) if webot.finish or webot.interrupt or webot.error or webot.fail: break else: sleep(0.5) save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status={"webot_status": "idle", "url": None}) webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = None save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) if webot.finish: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) action_history = webot.action_history last_page = webot.pages_viewed[-1] observation = JsonDataModel.from_raw_data( { "success": True, "result": json.dumps({"action_history": action_history, "last_page": last_page}, indent=4), "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.fail: observation = JsonDataModel.from_raw_data( { "success": True, "result": "The webot failed to execute the instruction.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.interrupt: observation = JsonDataModel.from_raw_data( { "success": False, "result": "The web browsing is interrupted by user.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.error: observation = JsonDataModel.from_raw_data( { "success": False, "result": "Error occurs during web browsing.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation except Exception as e: print(traceback.format_exc()) observation = JsonDataModel.from_raw_data( { "success": False, "result": f"Failed in web browsing with the input: {term}, please try again later.", "intermediate_steps": json.dumps({"error": str(e)}) } ) return observation webot = WebotExecutor.from_webot() llm = copy.deepcopy(llm) run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id) tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)] continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None interaction_executor = initialize_webot_agent( tools, llm, continue_model, memory=memory, verbose=True ) return interaction_executor @app.route("/api/chat_xlang_webot", methods=["POST"]) def chat_xlang_webot() -> Dict: """Returns the chat response of web agent.""" try: # Get request parameters request_json = request.get_json() user_id = request_json.pop("user_id", DEFAULT_USER_ID) chat_id = request_json["chat_id"] user_intent = request_json["user_intent"] parent_message_id = request_json["parent_message_id"] llm_name = request_json["llm_name"] temperature = request_json.get("temperature", 0.4) stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"] kwargs = { "temperature": temperature, "stop": stop_words, } # Get language model llm = get_llm(llm_name, **kwargs) logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="Request json").debug(request_json) human_message_id = message_id_register.add_variable(user_intent) ai_message_id = message_id_register.add_variable("") stream_handler = AgentStreamingStdOutCallbackHandler() # Build executor and run chat # reset webot and status reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) interaction_executor = create_webot_interaction_executor( llm=llm, llm_name=llm_name, chat_id=chat_id, user_id=user_id ) activated_message_list = message_pool.get_activated_message_list(user_id, chat_id, list(), parent_message_id) message_pool.load_agent_memory_from_list(interaction_executor.memory, activated_message_list) return stream_with_context( Response( single_round_chat_with_agent_streaming( interaction_executor=interaction_executor, user_intent=user_intent, human_message_id=human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id=chat_id, message_list=activated_message_list, parent_message_id=parent_message_id, stream_handler=stream_handler, llm_name=llm_name, app_type="webot", ), content_type="application/json", ) ) except Exception as e: import traceback traceback.print_exc() return Response(response=None, status=f"{OVERLOAD} backend is currently overloaded")
[ "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (2683, 2689), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((5769, 5875), 'real_agents.adapters.memory.ConversationReActBufferMemory', 'ConversationReActBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(10000)'}), "(memory_key='chat_history', return_messages=\n True, max_token_limit=10000)\n", (5798, 5875), False, 'from real_agents.adapters.memory import ConversationReActBufferMemory\n'), ((10859, 10885), 'real_agents.web_agent.WebotExecutor.from_webot', 'WebotExecutor.from_webot', ([], {}), '()\n', (10883, 10885), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((10896, 10914), 'copy.deepcopy', 'copy.deepcopy', (['llm'], {}), '(llm)\n', (10909, 10914), False, 'import copy\n'), ((11176, 11255), 'real_agents.adapters.interactive_executor.initialize_webot_agent', 'initialize_webot_agent', (['tools', 'llm', 'continue_model'], {'memory': 'memory', 'verbose': '(True)'}), '(tools, llm, continue_model, memory=memory, verbose=True)\n', (11198, 11255), False, 'from real_agents.adapters.interactive_executor import initialize_webot_agent\n'), ((1125, 1150), 'os.getenv', 'os.getenv', (['"""REDIS_SERVER"""'], {}), "('REDIS_SERVER')\n", (1134, 1150), False, 'import os\n'), ((1810, 1828), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1822, 1828), False, 'import pickle\n'), ((1931, 1956), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (1950, 1956), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((2152, 2171), 'pickle.dumps', 'pickle.dumps', (['webot'], {}), '(webot)\n', (2164, 2171), False, 'import pickle\n'), ((2364, 2393), 'json.loads', 'json.loads', (['webot_status_json'], {}), '(webot_status_json)\n', (2374, 2393), False, 'import json\n'), ((2579, 2603), 'json.dumps', 'json.dumps', (['webot_status'], {}), '(webot_status)\n', (2589, 2603), False, 'import json\n'), ((5172, 5209), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5185, 5209), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5227, 5262), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message'}), '(content=human_message)\n', (5239, 5262), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((10999, 11071), 'real_agents.adapters.agent_helpers.Tool', 'Tool', ([], {'name': 'webot.name', 'func': 'run_webot.run', 'description': 'webot.description'}), '(name=webot.name, func=run_webot.run, description=webot.description)\n', (11003, 11071), False, 'from real_agents.adapters.agent_helpers import AgentExecutor, Tool\n'), ((11505, 11523), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11521, 11523), False, 'from flask import Response, request, stream_with_context\n'), ((12048, 12075), 'backend.api.language_model.get_llm', 'get_llm', (['llm_name'], {}), '(llm_name, **kwargs)\n', (12055, 12075), False, 'from backend.api.language_model import get_llm\n'), ((12237, 12282), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['user_intent'], {}), '(user_intent)\n', (12269, 12282), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12307, 12343), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['""""""'], {}), "('')\n", (12339, 12343), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12370, 12407), 'real_agents.adapters.callbacks.agent_streaming.AgentStreamingStdOutCallbackHandler', 'AgentStreamingStdOutCallbackHandler', ([], {}), '()\n', (12405, 12407), False, 'from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler\n'), ((13127, 13224), 'backend.main.message_pool.load_agent_memory_from_list', 'message_pool.load_agent_memory_from_list', (['interaction_executor.memory', 'activated_message_list'], {}), '(interaction_executor.memory,\n activated_message_list)\n', (13167, 13224), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((14066, 14087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14085, 14087), False, 'import traceback\n'), ((14103, 14180), 'flask.Response', 'Response', ([], {'response': 'None', 'status': 'f"""{OVERLOAD} backend is currently overloaded"""'}), "(response=None, status=f'{OVERLOAD} backend is currently overloaded')\n", (14111, 14180), False, 'from flask import Response, request, stream_with_context\n'), ((12085, 12173), 'backend.main.logger.bind', 'logger.bind', ([], {'user_id': 'user_id', 'chat_id': 'chat_id', 'api': '"""/chat"""', 'msg_head': '"""Request json"""'}), "(user_id=user_id, chat_id=chat_id, api='/chat', msg_head=\n 'Request json')\n", (12096, 12173), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((13344, 13714), 'backend.utils.streaming.single_round_chat_with_agent_streaming', 'single_round_chat_with_agent_streaming', ([], {'interaction_executor': 'interaction_executor', 'user_intent': 'user_intent', 'human_message_id': 'human_message_id', 'ai_message_id': 'ai_message_id', 'user_id': 'user_id', 'chat_id': 'chat_id', 'message_list': 'activated_message_list', 'parent_message_id': 'parent_message_id', 'stream_handler': 'stream_handler', 'llm_name': 'llm_name', 'app_type': '"""webot"""'}), "(interaction_executor=\n interaction_executor, user_intent=user_intent, human_message_id=\n human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id\n =chat_id, message_list=activated_message_list, parent_message_id=\n parent_message_id, stream_handler=stream_handler, llm_name=llm_name,\n app_type='webot')\n", (13382, 13714), False, 'from backend.utils.streaming import single_round_chat_with_agent_streaming\n'), ((7547, 7557), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7552, 7557), False, 'from time import sleep\n'), ((10439, 10461), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10459, 10461), False, 'import traceback\n'), ((8392, 8477), 'json.dumps', 'json.dumps', (["{'action_history': action_history, 'last_page': last_page}"], {'indent': '(4)'}), "({'action_history': action_history, 'last_page': last_page}, indent=4\n )\n", (8402, 8477), False, 'import json\n'), ((8574, 8648), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (8584, 8648), False, 'import json\n'), ((9103, 9177), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9113, 9177), False, 'import json\n'), ((9634, 9708), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9644, 9708), False, 'import json\n'), ((10154, 10228), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (10164, 10228), False, 'import json\n')]
from langchain.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo") def get_summary_chain() -> LLMChain: summary_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 1. a short summary 2. two interesting facts about them \n{format_instructions} """ summary_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=summary_template, partial_variables={ "format_instructions": summary_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=summary_prompt_template) def get_interests_chain() -> LLMChain: interesting_facts_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 3 topics that might interest them \n{format_instructions} """ interesting_facts_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=interesting_facts_template, partial_variables={ "format_instructions": topics_of_interest_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=interesting_facts_prompt_template) def get_ice_breaker_chain() -> LLMChain: ice_breaker_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets \n{format_instructions} """ ice_breaker_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=ice_breaker_template, partial_variables={ "format_instructions": ice_breaker_parser.get_format_instructions() }, ) return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
[ "langchain.chains.LLMChain", "langchain_openai.ChatOpenAI" ]
[((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')]
from langchain.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo") def get_summary_chain() -> LLMChain: summary_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 1. a short summary 2. two interesting facts about them \n{format_instructions} """ summary_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=summary_template, partial_variables={ "format_instructions": summary_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=summary_prompt_template) def get_interests_chain() -> LLMChain: interesting_facts_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 3 topics that might interest them \n{format_instructions} """ interesting_facts_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=interesting_facts_template, partial_variables={ "format_instructions": topics_of_interest_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=interesting_facts_prompt_template) def get_ice_breaker_chain() -> LLMChain: ice_breaker_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets \n{format_instructions} """ ice_breaker_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=ice_breaker_template, partial_variables={ "format_instructions": ice_breaker_parser.get_format_instructions() }, ) return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
[ "langchain.chains.LLMChain", "langchain_openai.ChatOpenAI" ]
[((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')]
import asyncio import uvicorn from typing import AsyncIterable, Awaitable from dotenv import load_dotenv from fastapi import FastAPI from fastapi.responses import FileResponse, StreamingResponse from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage load_dotenv() async def wait_done(fn: Awaitable, event: asyncio.Event): try: await fn except Exception as e: print(e) event.set() finally: event.set() async def call_openai(question: str) -> AsyncIterable[str]: callback = AsyncIteratorCallbackHandler() model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback]) coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done) task = asyncio.create_task(coroutine) async for token in callback.aiter(): yield f"{token}" await task app = FastAPI() @app.post("/ask") def ask(body: dict): return StreamingResponse(call_openai(body['question']), media_type="text/event-stream") @app.get("/") async def homepage(): return FileResponse('statics/index.html') if __name__ == "__main__": uvicorn.run(host="0.0.0.0", port=8888, app=app)
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')]
import asyncio import uvicorn from typing import AsyncIterable, Awaitable from dotenv import load_dotenv from fastapi import FastAPI from fastapi.responses import FileResponse, StreamingResponse from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage load_dotenv() async def wait_done(fn: Awaitable, event: asyncio.Event): try: await fn except Exception as e: print(e) event.set() finally: event.set() async def call_openai(question: str) -> AsyncIterable[str]: callback = AsyncIteratorCallbackHandler() model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback]) coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done) task = asyncio.create_task(coroutine) async for token in callback.aiter(): yield f"{token}" await task app = FastAPI() @app.post("/ask") def ask(body: dict): return StreamingResponse(call_openai(body['question']), media_type="text/event-stream") @app.get("/") async def homepage(): return FileResponse('statics/index.html') if __name__ == "__main__": uvicorn.run(host="0.0.0.0", port=8888, app=app)
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')]
""" Adapted from https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py """ import json import os from langchain.llms import OpenAI llm = OpenAI( model_name="qwen", temperature=0, openai_api_base="http://192.168.0.53:7891/v1", openai_api_key="xxx", ) # 将一个插件的关键信息拼接成一段文本的模版。 TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}""" # ReAct prompting 的 instruction 模版,将包含插件的详细信息。 PROMPT_REACT = """Answer the following questions as best you can. You have access to the following tools: {tools_text} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tools_name_text}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Question: {query}""" def llm_with_plugin(prompt: str, history, list_of_plugin_info=()): """ Args: prompt: 用户的最新一个问题。 history: 用户与模型的对话历史,是一个 list, list 中的每个元素为 {"user": "用户输入", "bot": "模型输出"} 的一轮对话。 最新的一轮对话放 list 末尾。不包含最新一个问题。 list_of_plugin_info: 候选插件列表,是一个 list,list 中的每个元素为一个插件的关键信息。 比如 list_of_plugin_info = [plugin_info_0, plugin_info_1, plugin_info_2], 其中 plugin_info_0, plugin_info_1, plugin_info_2 这几个样例见本文档前文。 Returns: 模型对用户最新一个问题的回答。 """ chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, '')] # 需要让模型进行续写的初始文本 planning_prompt = build_input_text(chat_history, list_of_plugin_info) text = "" while True: output = text_completion(planning_prompt + text, stop_words=["Observation:", "Observation:\n"]) action, action_input, output = parse_latest_plugin_call(output) if action: # 需要调用插件 # action、action_input 分别为需要调用的插件代号、输入参数 # observation是插件返回的结果,为字符串 observation = call_plugin(action, action_input) output += f"\nObservation: {observation}\nThought:" text += output else: # 生成结束,并且不再需要调用插件 text += output break new_history = [] new_history.extend(history) new_history.append({"user": prompt, "bot": text}) return text, new_history def build_input_text(chat_history, list_of_plugin_info) -> str: """ 将对话历史、插件信息聚合成一段初始文本 """ tools_text = [] for plugin_info in list_of_plugin_info: tool = TOOL_DESC.format( name_for_model=plugin_info["name_for_model"], name_for_human=plugin_info["name_for_human"], description_for_model=plugin_info["description_for_model"], parameters=json.dumps(plugin_info["parameters"], ensure_ascii=False), ) if plugin_info.get("args_format", "json") == "json": tool += " Format the arguments as a JSON object." elif plugin_info['args_format'] == 'code': tool += " Enclose the code within triple backticks (`) at the beginning and end of the code." else: raise NotImplementedError tools_text.append(tool) tools_text = '\n\n'.join(tools_text) # 候选插件的代号 tools_name_text = ", ".join([plugin_info["name_for_model"] for plugin_info in list_of_plugin_info]) im_start = "<|im_start|>" im_end = "<|im_end|>" prompt = f"{im_start}system\nYou are a helpful assistant.{im_end}" for i, (query, response) in enumerate(chat_history): if list_of_plugin_info: # 如果有候选插件 # 倒数第一轮或倒数第二轮对话填入详细的插件信息,但具体什么位置填可以自行判断 if (len(chat_history) == 1) or (i == len(chat_history) - 2): query = PROMPT_REACT.format( tools_text=tools_text, tools_name_text=tools_name_text, query=query, ) query = query.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。 response = response.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。 # 使用续写模式(text completion)时,需要用如下格式区分用户和AI: prompt += f"\n{im_start}user\n{query}{im_end}" prompt += f"\n{im_start}assistant\n{response}{im_end}" assert prompt.endswith(f"\n{im_start}assistant\n{im_end}") prompt = prompt[: -len(f"{im_end}")] return prompt def text_completion(input_text: str, stop_words) -> str: # 作为一个文本续写模型来使用 im_end = "<|im_end|>" if im_end not in stop_words: stop_words = stop_words + [im_end] return llm(input_text, stop=stop_words) # 续写 input_text 的结果,不包含 input_text 的内容 def parse_latest_plugin_call(text): plugin_name, plugin_args = "", "" i = text.rfind("\nAction:") j = text.rfind("\nAction Input:") k = text.rfind("\nObservation:") if 0 <= i < j: # If the text has `Action` and `Action input`, if k < j: # but does not contain `Observation`, # then it is likely that `Observation` is ommited by the LLM, # because the output text may have discarded the stop word. text = text.rstrip() + "\nObservation:" # Add it back. k = text.rfind("\nObservation:") plugin_name = text[i + len("\nAction:"): j].strip() plugin_args = text[j + len("\nAction Input:"): k].strip() text = text[:k] return plugin_name, plugin_args, text def call_plugin(plugin_name: str, plugin_args: str) -> str: """ 请开发者自行完善这部分内容。这里的参考实现仅是 demo 用途,非生产用途 """ if plugin_name == "google_search": # 使用 SerpAPI 需要在这里填入您的 SERPAPI_API_KEY! os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY", default="") from langchain import SerpAPIWrapper return SerpAPIWrapper().run(json.loads(plugin_args)["search_query"]) elif plugin_name == "image_gen": import urllib.parse prompt = json.loads(plugin_args)["prompt"] prompt = urllib.parse.quote(prompt) return json.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False) else: raise NotImplementedError def test(): tools = [ { "name_for_human": "谷歌搜索", "name_for_model": "google_search", "description_for_model": "谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。", "parameters": [ { "name": "search_query", "description": "搜索关键词或短语", "required": True, "schema": {"type": "string"}, } ], }, { "name_for_human": "文生图", "name_for_model": "image_gen", "description_for_model": "文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL", "parameters": [ { "name": "prompt", "description": "英文关键词,描述了希望图像具有什么内容", "required": True, "schema": {"type": "string"}, } ], }, ] history = [] for query in ["你好", "谁是周杰伦", "他老婆是谁", "给我画个可爱的小猫吧,最好是黑猫"]: print(f"User's Query:\n{query}\n") response, history = llm_with_plugin(prompt=query, history=history, list_of_plugin_info=tools) print(f"Qwen's Response:\n{response}\n") if __name__ == "__main__": test() """如果执行成功,在终端下应当能看到如下输出: User's Query: 你好 Qwen's Response: Thought: 提供的工具对回答该问题帮助较小,我将不使用工具直接作答。 Final Answer: 你好!很高兴见到你。有什么我可以帮忙的吗? User's Query: 谁是周杰伦 Qwen's Response: Thought: 我应该使用Google搜索查找相关信息。 Action: google_search Action Input: {"search_query": "周杰伦"} Observation: Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman. Thought: I now know the final answer. Final Answer: 周杰伦(Jay Chou)是一位来自台湾的歌手、词曲创作人、音乐制作人、说唱歌手、演员、电视节目主持人和企业家。他以其独特的音乐风格和才华在华语乐坛享有很高的声誉。 User's Query: 他老婆是谁 Qwen's Response: Thought: 我应该使用Google搜索查找相关信息。 Action: google_search Action Input: {"search_query": "周杰伦 老婆"} Observation: Hannah Quinlivan Thought: I now know the final answer. Final Answer: 周杰伦的老婆是Hannah Quinlivan,她是一位澳大利亚籍的模特和演员。两人于2015年结婚,并育有一子。 User's Query: 给我画个可爱的小猫吧,最好是黑猫 Qwen's Response: Thought: 我应该使用文生图API来生成一张可爱的小猫图片。 Action: image_gen Action Input: {"prompt": "cute black cat"} Observation: {"image_url": "https://image.pollinations.ai/prompt/cute%20black%20cat"} Thought: I now know the final answer. Final Answer: 生成的可爱小猫图片的URL为https://image.pollinations.ai/prompt/cute%20black%20cat。你可以点击这个链接查看图片。 """
[ "langchain.SerpAPIWrapper", "langchain.llms.OpenAI" ]
[((153, 267), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""qwen"""', 'temperature': '(0)', 'openai_api_base': '"""http://192.168.0.53:7891/v1"""', 'openai_api_key': '"""xxx"""'}), "(model_name='qwen', temperature=0, openai_api_base=\n 'http://192.168.0.53:7891/v1', openai_api_key='xxx')\n", (159, 267), False, 'from langchain.llms import OpenAI\n'), ((5778, 5818), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {'default': '""""""'}), "('SERPAPI_API_KEY', default='')\n", (5787, 5818), False, 'import os\n'), ((6118, 6217), 'json.dumps', 'json.dumps', (["{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'}"], {'ensure_ascii': '(False)'}), "({'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},\n ensure_ascii=False)\n", (6128, 6217), False, 'import json\n'), ((2933, 2990), 'json.dumps', 'json.dumps', (["plugin_info['parameters']"], {'ensure_ascii': '(False)'}), "(plugin_info['parameters'], ensure_ascii=False)\n", (2943, 2990), False, 'import json\n'), ((5880, 5896), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (5894, 5896), False, 'from langchain import SerpAPIWrapper\n'), ((5901, 5924), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (5911, 5924), False, 'import json\n'), ((6025, 6048), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (6035, 6048), False, 'import json\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
from langchain.prompts import PromptTemplate _symptom_extract_template = """Consider the following conversation patient note: Patient note: {note} Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned). Provide your response strictly in the following format, replacing only the name_of_chief_complaint (keeping : yes), and refrain from including any additional text: <symptom> name_of_chief_complaint </symptom> """ _symptom_match_template = """Given the symptom: {symptom} which of the following retrievals is the best match? Retrievals: {retrievals} Select only one and write it below in the following format: <match> choice </match> Remember, do not include any other text, ensure your choice is in the provided retrievals, and follow the output format. """ CC_EXTRACT_PROMPT = PromptTemplate.from_template(_symptom_extract_template) CC_MATCH_PROMPT = PromptTemplate.from_template(_symptom_match_template)
[ "langchain.prompts.PromptTemplate.from_template" ]
[((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_match_template'], {}), '(_symptom_match_template)\n', (932, 957), False, 'from langchain.prompts import PromptTemplate\n')]
import requests from typing import Any, Dict, Optional from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains import APIChain from langchain.prompts import BasePromptTemplate from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from .requests_l402 import RequestsL402Wrapper from .requests_l402 import ResponseTextWrapper from lightning import LightningNode class L402APIChain(APIChain): requests_wrapper: Any @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, lightning_node = None, **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs.""" requests_L402 = RequestsL402Wrapper(lightning_node, requests) lang_chain_request_L402 = ResponseTextWrapper( requests_wrapper=requests_L402, ) get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=lang_chain_request_L402, api_docs=api_docs, **kwargs, )
[ "langchain.chains.llm.LLMChain" ]
[((1139, 1179), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (1147, 1179), False, 'from langchain.chains.llm import LLMChain\n'), ((1207, 1252), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (1215, 1252), False, 'from langchain.chains.llm import LLMChain\n')]
"""Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.chains.vector_db_qa.base import VectorDBQA from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
[ "langchain.chains.sql_database.base.SQLDatabaseChain", "langchain.prompts.loading.load_prompt_from_config", "langchain.chains.qa_with_sources.base.QAWithSourcesChain", "langchain.chains.pal.base.PALChain", "langchain.chains.combine_documents.refine.RefineDocumentsChain", "langchain.chains.llm.LLMChain", "langchain.chains.hyde.base.HypotheticalDocumentEmbedder", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.llm_checker.base.LLMCheckerChain", "langchain.llms.loading.load_llm_from_config", "langchain.chains.vector_db_qa.base.VectorDBQA", "langchain.chains.llm_bash.base.LLMBashChain", "langchain.utilities.loading.try_load_from_hub", "langchain.chains.llm_requests.LLMRequestsChain", "langchain.chains.api.base.APIChain", "langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain", "langchain.chains.llm_math.base.LLMMathChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain" ]
[((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')]
"""Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.chains.vector_db_qa.base import VectorDBQA from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
[ "langchain.chains.sql_database.base.SQLDatabaseChain", "langchain.prompts.loading.load_prompt_from_config", "langchain.chains.qa_with_sources.base.QAWithSourcesChain", "langchain.chains.pal.base.PALChain", "langchain.chains.combine_documents.refine.RefineDocumentsChain", "langchain.chains.llm.LLMChain", "langchain.chains.hyde.base.HypotheticalDocumentEmbedder", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.llm_checker.base.LLMCheckerChain", "langchain.llms.loading.load_llm_from_config", "langchain.chains.vector_db_qa.base.VectorDBQA", "langchain.chains.llm_bash.base.LLMBashChain", "langchain.utilities.loading.try_load_from_hub", "langchain.chains.llm_requests.LLMRequestsChain", "langchain.chains.api.base.APIChain", "langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain", "langchain.chains.llm_math.base.LLMMathChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain" ]
[((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')]
"""Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.chains.vector_db_qa.base import VectorDBQA from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
[ "langchain.chains.sql_database.base.SQLDatabaseChain", "langchain.prompts.loading.load_prompt_from_config", "langchain.chains.qa_with_sources.base.QAWithSourcesChain", "langchain.chains.pal.base.PALChain", "langchain.chains.combine_documents.refine.RefineDocumentsChain", "langchain.chains.llm.LLMChain", "langchain.chains.hyde.base.HypotheticalDocumentEmbedder", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.llm_checker.base.LLMCheckerChain", "langchain.llms.loading.load_llm_from_config", "langchain.chains.vector_db_qa.base.VectorDBQA", "langchain.chains.llm_bash.base.LLMBashChain", "langchain.utilities.loading.try_load_from_hub", "langchain.chains.llm_requests.LLMRequestsChain", "langchain.chains.api.base.APIChain", "langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain", "langchain.chains.llm_math.base.LLMMathChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain" ]
[((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')]
"""Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.chains.vector_db_qa.base import VectorDBQA from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
[ "langchain.chains.sql_database.base.SQLDatabaseChain", "langchain.prompts.loading.load_prompt_from_config", "langchain.chains.qa_with_sources.base.QAWithSourcesChain", "langchain.chains.pal.base.PALChain", "langchain.chains.combine_documents.refine.RefineDocumentsChain", "langchain.chains.llm.LLMChain", "langchain.chains.hyde.base.HypotheticalDocumentEmbedder", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.llm_checker.base.LLMCheckerChain", "langchain.llms.loading.load_llm_from_config", "langchain.chains.vector_db_qa.base.VectorDBQA", "langchain.chains.llm_bash.base.LLMBashChain", "langchain.utilities.loading.try_load_from_hub", "langchain.chains.llm_requests.LLMRequestsChain", "langchain.chains.api.base.APIChain", "langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain", "langchain.chains.llm_math.base.LLMMathChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain" ]
[((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')]
"""Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')]
"""Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')]
"""Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')]
"""Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')]
import os from langchain.llms.bedrock import Bedrock from langchain import PromptTemplate def get_llm(): model_kwargs = { "maxTokenCount": 1024, "stopSequences": [], "temperature": 0, "topP": 0.9 } llm = Bedrock( # credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default) region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default) endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary) model_id="amazon.titan-tg1-large", #use the Anthropic Claude model model_kwargs=model_kwargs) #configure the properties for Claude return llm def get_prompt(user_input, template): prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template prompt = prompt_template.format(user_input=user_input) return prompt def get_text_response(user_input, template): #text-to-text client function llm = get_llm() prompt = get_prompt(user_input, template) return llm.predict(prompt) #return a response to the prompt
[ "langchain.PromptTemplate.from_template" ]
[((844, 882), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (872, 882), False, 'from langchain import PromptTemplate\n'), ((437, 470), 'os.environ.get', 'os.environ.get', (['"""BWB_REGION_NAME"""'], {}), "('BWB_REGION_NAME')\n", (451, 470), False, 'import os\n'), ((536, 570), 'os.environ.get', 'os.environ.get', (['"""BWB_ENDPOINT_URL"""'], {}), "('BWB_ENDPOINT_URL')\n", (550, 570), False, 'import os\n')]
from langchain import PromptTemplate, LLMChain from langchain.document_loaders import TextLoader from langchain.embeddings import LlamaCppEmbeddings from langchain.llms import GPT4All from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.callbacks.base import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores.faiss import FAISS # SCRIPT INFO: # # This script allows you to create a vectorstore from a file and query it with a question (hard coded). # # It shows how you could send questions to a GPT4All custom knowledge base and receive answers. # # If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided. # Setup gpt4all_path = './models/gpt4all-converted.bin' llama_path = './models/ggml-model-q4_0.bin' callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) loader = TextLoader('./docs/shortened_sotu.txt') embeddings = LlamaCppEmbeddings(model_path=llama_path) llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True) # Split text def split_chunks(sources): chunks = [] splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32) for chunk in splitter.split_documents(sources): chunks.append(chunk) return chunks def create_index(chunks): texts = [doc.page_content for doc in chunks] metadatas = [doc.metadata for doc in chunks] search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas) return search_index def similarity_search(query, index): matched_docs = index.similarity_search(query, k=4) sources = [] for doc in matched_docs: sources.append( { "page_content": doc.page_content, "metadata": doc.metadata, } ) return matched_docs, sources # Create Index # docs = loader.load() # chunks = split_chunks(docs) # index = create_index(chunks) # Save Index (use this to save the index for later use) # Comment the line below after running once successfully (IMPORTANT) # index.save_local("state_of_the_union_index") # Load Index (use this to load the index from a file, eg on your second time running things and beyond) # Uncomment the line below after running once successfully (IMPORTANT) index = FAISS.load_local("./full_sotu_index", embeddings) # Set your query here manually question = "Summarize the comments about NATO and its purpose." matched_docs, sources = similarity_search(question, index) template = """ Please use the following context to answer questions. Context: {context} --- Question: {question} Answer: Let's think step by step.""" context = "\n".join([doc.page_content for doc in matched_docs]) prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context) llm_chain = LLMChain(prompt=prompt, llm=llm) print(llm_chain.run(question))
[ "langchain.llms.GPT4All", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.document_loaders.TextLoader", "langchain.vectorstores.faiss.FAISS.load_local", "langchain.vectorstores.faiss.FAISS.from_texts", "langchain.embeddings.LlamaCppEmbeddings", "langchain.PromptTemplate" ]
[((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')]
from langchain import PromptTemplate, LLMChain from langchain.document_loaders import TextLoader from langchain.embeddings import LlamaCppEmbeddings from langchain.llms import GPT4All from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.callbacks.base import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores.faiss import FAISS # SCRIPT INFO: # # This script allows you to create a vectorstore from a file and query it with a question (hard coded). # # It shows how you could send questions to a GPT4All custom knowledge base and receive answers. # # If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided. # Setup gpt4all_path = './models/gpt4all-converted.bin' llama_path = './models/ggml-model-q4_0.bin' callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) loader = TextLoader('./docs/shortened_sotu.txt') embeddings = LlamaCppEmbeddings(model_path=llama_path) llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True) # Split text def split_chunks(sources): chunks = [] splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32) for chunk in splitter.split_documents(sources): chunks.append(chunk) return chunks def create_index(chunks): texts = [doc.page_content for doc in chunks] metadatas = [doc.metadata for doc in chunks] search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas) return search_index def similarity_search(query, index): matched_docs = index.similarity_search(query, k=4) sources = [] for doc in matched_docs: sources.append( { "page_content": doc.page_content, "metadata": doc.metadata, } ) return matched_docs, sources # Create Index # docs = loader.load() # chunks = split_chunks(docs) # index = create_index(chunks) # Save Index (use this to save the index for later use) # Comment the line below after running once successfully (IMPORTANT) # index.save_local("state_of_the_union_index") # Load Index (use this to load the index from a file, eg on your second time running things and beyond) # Uncomment the line below after running once successfully (IMPORTANT) index = FAISS.load_local("./full_sotu_index", embeddings) # Set your query here manually question = "Summarize the comments about NATO and its purpose." matched_docs, sources = similarity_search(question, index) template = """ Please use the following context to answer questions. Context: {context} --- Question: {question} Answer: Let's think step by step.""" context = "\n".join([doc.page_content for doc in matched_docs]) prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context) llm_chain = LLMChain(prompt=prompt, llm=llm) print(llm_chain.run(question))
[ "langchain.llms.GPT4All", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.document_loaders.TextLoader", "langchain.vectorstores.faiss.FAISS.load_local", "langchain.vectorstores.faiss.FAISS.from_texts", "langchain.embeddings.LlamaCppEmbeddings", "langchain.PromptTemplate" ]
[((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')]
import os import requests from langchain.tools import tool from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import FAISS from sec_api import QueryApi from unstructured.partition.html import partition_html class SECTools(): @tool("Search 10-Q form") def search_10q(data): """ Useful to search information from the latest 10-Q form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested and what question you have from it. For example, `AAPL|what was last quarter's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-Q\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer @tool("Search 10-K form") def search_10k(data): """ Useful to search information from the latest 10-K form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested, what question you have from it. For example, `AAPL|what was last year's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-K\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer def __embedding_search(url, ask): text = SECTools.__download_form_html(url) elements = partition_html(text=text) content = "\n".join([str(el) for el in elements]) text_splitter = CharacterTextSplitter( separator = "\n", chunk_size = 1000, chunk_overlap = 150, length_function = len, is_separator_regex = False, ) docs = text_splitter.create_documents([content]) retriever = FAISS.from_documents( docs, OpenAIEmbeddings() ).as_retriever() answers = retriever.get_relevant_documents(ask, top_k=4) answers = "\n\n".join([a.page_content for a in answers]) return answers def __download_form_html(url): headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7', 'Cache-Control': 'max-age=0', 'Dnt': '1', 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"', 'Sec-Ch-Ua-Mobile': '?0', 'Sec-Ch-Ua-Platform': '"macOS"', 'Sec-Fetch-Dest': 'document', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-User': '?1', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36' } response = requests.get(url, headers=headers) return response.text
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.text_splitter.CharacterTextSplitter", "langchain.tools.tool" ]
[((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import os import requests from langchain.tools import tool from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import FAISS from sec_api import QueryApi from unstructured.partition.html import partition_html class SECTools(): @tool("Search 10-Q form") def search_10q(data): """ Useful to search information from the latest 10-Q form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested and what question you have from it. For example, `AAPL|what was last quarter's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-Q\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer @tool("Search 10-K form") def search_10k(data): """ Useful to search information from the latest 10-K form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested, what question you have from it. For example, `AAPL|what was last year's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-K\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer def __embedding_search(url, ask): text = SECTools.__download_form_html(url) elements = partition_html(text=text) content = "\n".join([str(el) for el in elements]) text_splitter = CharacterTextSplitter( separator = "\n", chunk_size = 1000, chunk_overlap = 150, length_function = len, is_separator_regex = False, ) docs = text_splitter.create_documents([content]) retriever = FAISS.from_documents( docs, OpenAIEmbeddings() ).as_retriever() answers = retriever.get_relevant_documents(ask, top_k=4) answers = "\n\n".join([a.page_content for a in answers]) return answers def __download_form_html(url): headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7', 'Cache-Control': 'max-age=0', 'Dnt': '1', 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"', 'Sec-Ch-Ua-Mobile': '?0', 'Sec-Ch-Ua-Platform': '"macOS"', 'Sec-Fetch-Dest': 'document', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-User': '?1', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36' } response = requests.get(url, headers=headers) return response.text
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.text_splitter.CharacterTextSplitter", "langchain.tools.tool" ]
[((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import os import requests from langchain.tools import tool from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import FAISS from sec_api import QueryApi from unstructured.partition.html import partition_html class SECTools(): @tool("Search 10-Q form") def search_10q(data): """ Useful to search information from the latest 10-Q form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested and what question you have from it. For example, `AAPL|what was last quarter's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-Q\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer @tool("Search 10-K form") def search_10k(data): """ Useful to search information from the latest 10-K form for a given stock. The input to this tool should be a pipe (|) separated text of length two, representing the stock ticker you are interested, what question you have from it. For example, `AAPL|what was last year's revenue`. """ stock, ask = data.split("|") queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY']) query = { "query": { "query_string": { "query": f"ticker:{stock} AND formType:\"10-K\"" } }, "from": "0", "size": "1", "sort": [{ "filedAt": { "order": "desc" }}] } fillings = queryApi.get_filings(query)['filings'] if len(fillings) == 0: return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct." link = fillings[0]['linkToFilingDetails'] answer = SECTools.__embedding_search(link, ask) return answer def __embedding_search(url, ask): text = SECTools.__download_form_html(url) elements = partition_html(text=text) content = "\n".join([str(el) for el in elements]) text_splitter = CharacterTextSplitter( separator = "\n", chunk_size = 1000, chunk_overlap = 150, length_function = len, is_separator_regex = False, ) docs = text_splitter.create_documents([content]) retriever = FAISS.from_documents( docs, OpenAIEmbeddings() ).as_retriever() answers = retriever.get_relevant_documents(ask, top_k=4) answers = "\n\n".join([a.page_content for a in answers]) return answers def __download_form_html(url): headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7', 'Cache-Control': 'max-age=0', 'Dnt': '1', 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"', 'Sec-Ch-Ua-Mobile': '?0', 'Sec-Ch-Ua-Platform': '"macOS"', 'Sec-Fetch-Dest': 'document', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-User': '?1', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36' } response = requests.get(url, headers=headers) return response.text
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.text_splitter.CharacterTextSplitter", "langchain.tools.tool" ]
[((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
# flake8: noqa from langchain.prompts.prompt import PromptTemplate _PROMPT_TEMPLATE = """You are GPT-3, and you can't do math. You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers. So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest: Question: ${{Question with hard calculation.}} ```python ${{Code that prints what you need to know}} ``` ```output ${{Output of your code}} ``` Answer: ${{Answer}} Otherwise, use this simpler format: Question: ${{Question without hard calculation}} Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```python print(37593 * 67) ``` ```output 2518731 ``` Answer: 2518731 Question: {question} """ PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
[ "langchain.prompts.prompt.PromptTemplate" ]
[((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')]
# flake8: noqa from langchain.prompts.prompt import PromptTemplate _PROMPT_TEMPLATE = """You are GPT-3, and you can't do math. You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers. So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest: Question: ${{Question with hard calculation.}} ```python ${{Code that prints what you need to know}} ``` ```output ${{Output of your code}} ``` Answer: ${{Answer}} Otherwise, use this simpler format: Question: ${{Question without hard calculation}} Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```python print(37593 * 67) ``` ```output 2518731 ``` Answer: 2518731 Question: {question} """ PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
[ "langchain.prompts.prompt.PromptTemplate" ]
[((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')]
# flake8: noqa from langchain.prompts.prompt import PromptTemplate _PROMPT_TEMPLATE = """You are GPT-3, and you can't do math. You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers. So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest: Question: ${{Question with hard calculation.}} ```python ${{Code that prints what you need to know}} ``` ```output ${{Output of your code}} ``` Answer: ${{Answer}} Otherwise, use this simpler format: Question: ${{Question without hard calculation}} Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```python print(37593 * 67) ``` ```output 2518731 ``` Answer: 2518731 Question: {question} """ PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
[ "langchain.prompts.prompt.PromptTemplate" ]
[((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')]
# flake8: noqa from langchain.prompts.prompt import PromptTemplate _PROMPT_TEMPLATE = """You are GPT-3, and you can't do math. You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers. So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest: Question: ${{Question with hard calculation.}} ```python ${{Code that prints what you need to know}} ``` ```output ${{Output of your code}} ``` Answer: ${{Answer}} Otherwise, use this simpler format: Question: ${{Question without hard calculation}} Answer: ${{Answer}} Begin. Question: What is 37593 * 67? ```python print(37593 * 67) ``` ```output 2518731 ``` Answer: 2518731 Question: {question} """ PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
[ "langchain.prompts.prompt.PromptTemplate" ]
[((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')]
import streamlit as st from langchain.prompts import PromptTemplate chat_template = PromptTemplate( input_variables=['transcript','summary','chat_history','user_message', 'sentiment_report'], template=''' You are an AI chatbot intended to discuss about the user's audio transcription. \nTRANSCRIPT: "{transcript}" \nTRANSCIRPT SUMMARY: "{summary}" \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}" \nCHAT HISTORY: {chat_history} \nUSER MESSAGE: "{user_message}" \nAI RESPONSE HERE: ''' ) sentiment_prompt = PromptTemplate( input_variables=['transcript','summary'], template=''' Return a single word sentiment of either ['Positive','Negative' or 'Neutral'] from this transcript and summary. After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment. \nTRANSCRIPT: {transcript} \nTRANSCRIPT SUMMARY: {summary} \nSENTIMENT LABEL HERE ('Positive','Negative', or 'Neutral') <comma-seperated> REPORT HERE: ''' ) fact_check_prompt = ''' Fact-check this transcript for factual or logical inacurracies or inconsistencies \nWrite a report on the factuality / logic of the transcirpt \nTRANSCRIPT: {} \nTRANSCRIPT SUMMARY: {} \nAI FACT CHECK RESPONSE HERE: '''
[ "langchain.prompts.PromptTemplate" ]
[((88, 562), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary', 'chat_history', 'user_message', 'sentiment_report']", 'template': '"""\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\', \'chat_history\',\n \'user_message\', \'sentiment_report\'], template=\n """\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """\n )\n', (102, 562), False, 'from langchain.prompts import PromptTemplate\n'), ((595, 1095), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary']", 'template': '"""\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\'], template=\n """\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """\n )\n', (609, 1095), False, 'from langchain.prompts import PromptTemplate\n')]
from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.chat_models import ChatOpenAI from dotenv import load_dotenv import os from langchain.chains import SimpleSequentialChain # Create a .env file in the root of your project and add your OpenAI API key to it # Load env files load_dotenv() openai_api_key = os.environ.get('openai_api_key') # This is an LLMChain to generate company names given a company description. llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo") # Create templates template_name = """You are a company name generator. Based on a company description, it is your job to create a company name. Company description: {company_description} Company name:""" prompt_template_name = PromptTemplate(input_variables=["company_description"], template=template_name) # This is an LLMChain to generate company slogans given a company name and company description. template_slogan = """You are a company slogan generator. Based on a company name, it is your job to create a company slogan. Company name: {company_name} Company slogan:""" prompt_template_slogan = PromptTemplate(input_variables=["company_name"], template=template_slogan) # Create chains name_chain = LLMChain(llm=llm, prompt=prompt_template_name) slogan_chain = LLMChain(llm=llm, prompt=prompt_template_slogan) # This is the overall chain where we run these two chains in sequence. overall_chain = SimpleSequentialChain(chains=[name_chain, slogan_chain], verbose=True) slogan = overall_chain.run("We are a company that sells shoes.")
[ "langchain.chains.LLMChain", "langchain.chains.SimpleSequentialChain", "langchain.prompts.PromptTemplate", "langchain.chat_models.ChatOpenAI" ]
[((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((352, 384), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (366, 384), False, 'import os\n'), ((469, 524), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (479, 524), False, 'from langchain.chat_models import ChatOpenAI\n'), ((757, 836), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_description']", 'template': 'template_name'}), "(input_variables=['company_description'], template=template_name)\n", (771, 836), False, 'from langchain.prompts import PromptTemplate\n'), ((1137, 1211), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_name']", 'template': 'template_slogan'}), "(input_variables=['company_name'], template=template_slogan)\n", (1151, 1211), False, 'from langchain.prompts import PromptTemplate\n'), ((1242, 1288), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_name'}), '(llm=llm, prompt=prompt_template_name)\n', (1250, 1288), False, 'from langchain.chains import LLMChain\n'), ((1304, 1352), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_slogan'}), '(llm=llm, prompt=prompt_template_slogan)\n', (1312, 1352), False, 'from langchain.chains import LLMChain\n'), ((1441, 1511), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[name_chain, slogan_chain]', 'verbose': '(True)'}), '(chains=[name_chain, slogan_chain], verbose=True)\n', (1462, 1511), False, 'from langchain.chains import SimpleSequentialChain\n')]
import os import streamlit as st from PyPDF2 import PdfReader, PdfWriter from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.callbacks import get_openai_callback def ChatPDF(text): # st.write(text) #split into chunks text_splitter = CharacterTextSplitter( separator="\n", chunk_size = 1000, chunk_overlap = 200, length_function=len ) chunks = text_splitter.split_text(text) # st.write(chunks) # creating embeddings OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password") if OPENAI_API_KEY: embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) # st.write("Embedding Created") # st.write(embeddings) knowledge_base = FAISS.from_texts(chunks, embeddings) st.write("Knowledge Base created ") #show user input def ask_question(i=0): user_question = st.text_input("Ask a question about your PDF?",key = i) if user_question: docs = knowledge_base.similarity_search(user_question) # st.write(docs) llm = OpenAI(openai_api_key=OPENAI_API_KEY) chain = load_qa_chain(llm, chain_type="stuff") with get_openai_callback() as cb: response = chain.run(input_documents=docs, question=user_question) print(cb) st.write(response) ask_question(i+1) ask_question() def main(): st.set_page_config(page_title="Ask ur PDF", page_icon="📄") hide_st_style = """ <style> #mainMenue {visibility: hidden;} footer {visibility: hidden;} #header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) # st.write(st.set_page_config) st.header("Ask your PDF 🤔💭") #uploading file pdf = st.file_uploader("Upload your PDF ", type="pdf") # extract the text if pdf is not None: option = st.selectbox("What you want to do with PDF📜", [ "Meta Data📂", "Extract Raw Text📄", "Extract Links🔗", "Extract Images🖼️", "Make PDF password protected🔐", "PDF Annotation📝", "ChatPDF💬" ]) pdf_reader = PdfReader(pdf) text = "" for page in pdf_reader.pages: text += page.extract_text() if option == "Meta Data📂": st.write(pdf_reader.metadata) elif option == "Make PDF password protected🔐": pswd = st.text_input("Enter yourpass word", type="password") if pswd: with st.spinner("Encrypting..."): pdf_writer = PdfWriter() for page_num in range(len(pdf_reader.pages)): pdf_writer.add_page(pdf_reader.pages[page_num]) pdf_writer.encrypt(pswd) with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f: pdf_writer.write(f) st.success("Encryption Successful!") st.download_button( label="Download Encrypted PDF", data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(), file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf", mime="application/octet-stream", ) try: os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf") except: pass elif option == "Extract Raw Text📄": st.write(text) elif option == "Extract Links🔗": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: subtype = annot.get_object()["/Subtype"] if subtype == "/Link": try: st.write(annot.get_object()["/A"]["/URI"]) except: pass elif option == "Extract Images🖼️": for page in pdf_reader.pages: try: for img in page.images: st.write(img.name) st.image(img.data) except: pass elif option == "PDF Annotation📝": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: obj = annot.get_object() st.write(obj) st.write("***********") annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]} st.write(annotation) elif option == "ChatPDF💬": ChatPDF(text) if __name__ == "__main__": main()
[ "langchain.chains.question_answering.load_qa_chain", "langchain.text_splitter.CharacterTextSplitter", "langchain.llms.OpenAI", "langchain.callbacks.get_openai_callback", "langchain.vectorstores.FAISS.from_texts", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')]
import os import streamlit as st from PyPDF2 import PdfReader, PdfWriter from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.callbacks import get_openai_callback def ChatPDF(text): # st.write(text) #split into chunks text_splitter = CharacterTextSplitter( separator="\n", chunk_size = 1000, chunk_overlap = 200, length_function=len ) chunks = text_splitter.split_text(text) # st.write(chunks) # creating embeddings OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password") if OPENAI_API_KEY: embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) # st.write("Embedding Created") # st.write(embeddings) knowledge_base = FAISS.from_texts(chunks, embeddings) st.write("Knowledge Base created ") #show user input def ask_question(i=0): user_question = st.text_input("Ask a question about your PDF?",key = i) if user_question: docs = knowledge_base.similarity_search(user_question) # st.write(docs) llm = OpenAI(openai_api_key=OPENAI_API_KEY) chain = load_qa_chain(llm, chain_type="stuff") with get_openai_callback() as cb: response = chain.run(input_documents=docs, question=user_question) print(cb) st.write(response) ask_question(i+1) ask_question() def main(): st.set_page_config(page_title="Ask ur PDF", page_icon="📄") hide_st_style = """ <style> #mainMenue {visibility: hidden;} footer {visibility: hidden;} #header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) # st.write(st.set_page_config) st.header("Ask your PDF 🤔💭") #uploading file pdf = st.file_uploader("Upload your PDF ", type="pdf") # extract the text if pdf is not None: option = st.selectbox("What you want to do with PDF📜", [ "Meta Data📂", "Extract Raw Text📄", "Extract Links🔗", "Extract Images🖼️", "Make PDF password protected🔐", "PDF Annotation📝", "ChatPDF💬" ]) pdf_reader = PdfReader(pdf) text = "" for page in pdf_reader.pages: text += page.extract_text() if option == "Meta Data📂": st.write(pdf_reader.metadata) elif option == "Make PDF password protected🔐": pswd = st.text_input("Enter yourpass word", type="password") if pswd: with st.spinner("Encrypting..."): pdf_writer = PdfWriter() for page_num in range(len(pdf_reader.pages)): pdf_writer.add_page(pdf_reader.pages[page_num]) pdf_writer.encrypt(pswd) with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f: pdf_writer.write(f) st.success("Encryption Successful!") st.download_button( label="Download Encrypted PDF", data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(), file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf", mime="application/octet-stream", ) try: os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf") except: pass elif option == "Extract Raw Text📄": st.write(text) elif option == "Extract Links🔗": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: subtype = annot.get_object()["/Subtype"] if subtype == "/Link": try: st.write(annot.get_object()["/A"]["/URI"]) except: pass elif option == "Extract Images🖼️": for page in pdf_reader.pages: try: for img in page.images: st.write(img.name) st.image(img.data) except: pass elif option == "PDF Annotation📝": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: obj = annot.get_object() st.write(obj) st.write("***********") annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]} st.write(annotation) elif option == "ChatPDF💬": ChatPDF(text) if __name__ == "__main__": main()
[ "langchain.chains.question_answering.load_qa_chain", "langchain.text_splitter.CharacterTextSplitter", "langchain.llms.OpenAI", "langchain.callbacks.get_openai_callback", "langchain.vectorstores.FAISS.from_texts", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')]
"""Toolkit for the Wolfram Alpha API.""" from typing import List from langchain.tools.base import BaseTool, BaseToolkit from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper class WolframAlphaToolkit(BaseToolkit): """Tool that adds the capability to interact with Wolfram Alpha.""" wolfram_alpha_appid: str def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid) return [ WolframAlphaQueryRun( api_wrapper=wrapper, ) ]
[ "langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper", "langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun" ]
[((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')]
"""Toolkit for the Wolfram Alpha API.""" from typing import List from langchain.tools.base import BaseTool, BaseToolkit from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper class WolframAlphaToolkit(BaseToolkit): """Tool that adds the capability to interact with Wolfram Alpha.""" wolfram_alpha_appid: str def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid) return [ WolframAlphaQueryRun( api_wrapper=wrapper, ) ]
[ "langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper", "langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun" ]
[((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')]
"""The function tools tht are actually implemented""" import json import subprocess from langchain.agents.load_tools import load_tools from langchain.tools import BaseTool from langchain.utilities.bash import BashProcess from toolemu.tools.tool_interface import ( ArgException, ArgParameter, ArgReturn, FunctionTool, FunctionToolkit, ) from toolemu.utils.my_typing import * from .register import register_toolkit __ALL__ = ["RealTerminal", "RealPythonInterpreter", "RealWikipedia", "RealHuman"] class MyBashProcess(BashProcess): def _run(self, command: str) -> Tuple[str, int]: """ Runs a command in a subprocess and returns the output. Args: command: The command to run """ # noqa: E501 try: output = ( subprocess.run( command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) .stdout.decode() .strip() ) except subprocess.CalledProcessError as error: if self.return_err_output: return error.stdout.decode().strip(), error.returncode return str(error).strip(), error.returncode if self.strip_newlines: output = output.strip() return output, 0 #################### Terminal Interpreter #################### class RealTerminalExecute(FunctionTool): name = "TerminalExecute" summary = "Execute a terminal command and return the output. This command should follow proper syntax and be supported by the terminal environment." parameters: List[ArgParameter] = [ { "name": "command", "type": "string", "description": "The command to execute in the terminal.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "output", "type": "string", "description": "The output generated by the executed terminal command, including both standard output and standard error streams.", }, { "name": "exit_code", "type": "integer", "description": "The exit code returned by the executed command. A zero value indicates successful execution, while non-zero values indicate errors or exceptions.", }, ] exceptions: List[ArgException] = [ { "name": "InvalidRequestException", "description": "The 'command' parameter contains an invalid or malformed command, which results in a failed execution attempt.", } ] _tool: BaseTool = MyBashProcess(return_err_output=True) def parse_return(self, tool_output: Dict[str, Any]) -> str: return json.dumps({"output": tool_output[0], "exit_code": tool_output[1]}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["command"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["command"]) @register_toolkit() class RealTerminal(FunctionToolkit): name_for_human = "Terminal command executor" description_for_human = "Executes commands in a terminal." name_for_model = "Terminal" description_for_model = "Executes commands in a terminal on the user's local system. Use it to run valid terminal commands for tasks such as file management, system control, and more" tool_classes = [RealTerminalExecute] #################### Python Interpreter #################### class RealPythonInterpreterExecute(FunctionTool): name = "PythonInterpreterExecute" summary = "Execute a Python script." parameters: List[ArgParameter] = [ { "name": "script", "type": "string", "description": "The python script to execute.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "result", "type": "string", "description": "The printed output of the script.", } ] exceptions: List[ArgException] = [] _tool: BaseTool = load_tools(["python_repl"])[0] def parse_return(self, tool_output: str) -> str: return json.dumps({"result": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["script"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["script"]) @register_toolkit() class RealPythonInterpreter(FunctionToolkit): name_for_human = "Python interpreter" description_for_human = "A Python shell." name_for_model = "PythonInterpreter" description_for_model = "A Python shell. Use it to execute python scripts. If you want to see the output of a value, you should print it out with `print(...)`." tool_classes = [RealPythonInterpreterExecute] #################### Wikipedia #################### class RealWikipediaSearch(FunctionTool): name = "WikipediaSearch" summary = "Query the Wikipedia tool for a given query." parameters: List[ArgParameter] = [ { "name": "query", "type": "string", "description": "The query to search for.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "result", "type": "string", "description": "The summary of the Wikipedia article.", } ] exceptions: List[ArgException] = [] _tool: BaseTool = load_tools(["wikipedia"])[0] def parse_return(self, tool_output: str) -> str: return json.dumps({"result": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["query"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["query"]) @register_toolkit() class RealWikipedia(FunctionToolkit): name_for_human = "Wikipedia search tool" description_for_human = "Tool for searching through Wikipedia." name_for_model = "Wikipedia" description_for_model = "Tool for searching through Wikipedia. Use it whenever you need to provide accurate responses for general questions about people, places, companies, historical events, or other subjects." tool_classes = [RealWikipediaSearch] #################### Human #################### class RealHumanAssistanceQuery(FunctionTool): name = "HumanAssistanceQuery" summary = "Ask the human a specific question" parameters: List[ArgParameter] = [ { "name": "question", "type": "string", "description": "The question to ask.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "answer", "type": "string", "description": "The answer from the human.", } ] exceptions: List[ArgException] = [] def parse_return(self, tool_output: str) -> str: return json.dumps({"answer": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: print("\n" + tool_input["question"] + "\n") return input(tool_input["question"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return NotImplementedError("Human tool does not support async") @register_toolkit() class RealHuman(FunctionToolkit): name_for_human = "Human assistance" description_for_human = "Seek human assistance or guidance." name_for_model = "HumanAssistance" description_for_model = "Seek human assistance or guidance. Use it when expert human or user input is necessary, e.g., when you need some human knowledge, user permission, user-specific information." tool_classes = [RealHumanAssistanceQuery]
[ "langchain.agents.load_tools.load_tools" ]
[((2863, 2930), 'json.dumps', 'json.dumps', (["{'output': tool_output[0], 'exit_code': tool_output[1]}"], {}), "({'output': tool_output[0], 'exit_code': tool_output[1]})\n", (2873, 2930), False, 'import json\n'), ((4269, 4296), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['python_repl']"], {}), "(['python_repl'])\n", (4279, 4296), False, 'from langchain.agents.load_tools import load_tools\n'), ((4369, 4404), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (4379, 4404), False, 'import json\n'), ((5711, 5736), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['wikipedia']"], {}), "(['wikipedia'])\n", (5721, 5736), False, 'from langchain.agents.load_tools import load_tools\n'), ((5809, 5844), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (5819, 5844), False, 'import json\n'), ((7232, 7267), 'json.dumps', 'json.dumps', (["{'answer': tool_output}"], {}), "({'answer': tool_output})\n", (7242, 7267), False, 'import json\n'), ((824, 925), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (838, 925), False, 'import subprocess\n')]
from typing import List, Optional, Any, Dict from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env from pydantic import Extra, root_validator from sam.gpt.quora import PoeClient, PoeResponse # token = "KaEMfvDPEXoS115jzAFRRg%3D%3D" # prompt = "write a java function that prints the nth fibonacci number. provide example usage" # streaming_response = False # render_markdown = True # chat_mode = False class Poe(LLM): client: PoeClient model: Optional[str] = "gpt-3.5-turbo" custom_model: bool = False token: str @root_validator() def validate_environment(cls, values: Dict) -> Dict: token = get_from_dict_or_env( values, "token", "POE_COOKIE" ) values["client"] = PoeClient(token) return values class Config: extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" models = { 'sage': 'capybara', 'gpt-4': 'beaver', 'claude-v1.2': 'a2_2', 'claude-instant-v1.0': 'a2', 'gpt-3.5-turbo': 'chinchilla', } _model = models[self.model] if not self.custom_model else self.model return { "model": _model, "token": self.token, } @property def _identifying_params(self) -> Dict[str, Any]: return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: return "poe" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: params = self._default_params for chunk in self.client.send_message(params.model, prompt): pass response = PoeResponse( { 'id': chunk['messageId'], 'object': 'text_completion', 'created': chunk['creationTime'], 'model': params.model, 'choices': [ { 'text': chunk['text'], 'index': 0, 'logprobs': None, 'finish_reason': 'stop', } ], 'usage': { 'prompt_tokens': len(prompt), 'completion_tokens': len(chunk['text']), 'total_tokens': len(prompt) + len(chunk['text']), }, } ) text = response.completion.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')]
from typing import List, Optional, Any, Dict from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env from pydantic import Extra, root_validator from sam.gpt.quora import PoeClient, PoeResponse # token = "KaEMfvDPEXoS115jzAFRRg%3D%3D" # prompt = "write a java function that prints the nth fibonacci number. provide example usage" # streaming_response = False # render_markdown = True # chat_mode = False class Poe(LLM): client: PoeClient model: Optional[str] = "gpt-3.5-turbo" custom_model: bool = False token: str @root_validator() def validate_environment(cls, values: Dict) -> Dict: token = get_from_dict_or_env( values, "token", "POE_COOKIE" ) values["client"] = PoeClient(token) return values class Config: extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" models = { 'sage': 'capybara', 'gpt-4': 'beaver', 'claude-v1.2': 'a2_2', 'claude-instant-v1.0': 'a2', 'gpt-3.5-turbo': 'chinchilla', } _model = models[self.model] if not self.custom_model else self.model return { "model": _model, "token": self.token, } @property def _identifying_params(self) -> Dict[str, Any]: return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: return "poe" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: params = self._default_params for chunk in self.client.send_message(params.model, prompt): pass response = PoeResponse( { 'id': chunk['messageId'], 'object': 'text_completion', 'created': chunk['creationTime'], 'model': params.model, 'choices': [ { 'text': chunk['text'], 'index': 0, 'logprobs': None, 'finish_reason': 'stop', } ], 'usage': { 'prompt_tokens': len(prompt), 'completion_tokens': len(chunk['text']), 'total_tokens': len(prompt) + len(chunk['text']), }, } ) text = response.completion.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')]
from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.autogpt.output_parser import ( AutoGPTOutputParser, BaseAutoGPTOutputParser, ) from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( FINISH_NAME, ) from langchain.schema import ( AIMessage, BaseMessage, Document, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores.base import VectorStoreRetriever class AutoGPT: """Agent class for interacting with Auto-GPT.""" def __init__( self, ai_name: str, memory: VectorStoreRetriever, chain: LLMChain, output_parser: BaseAutoGPTOutputParser, tools: List[BaseTool], feedback_tool: Optional[HumanInputRun] = None, ): self.ai_name = ai_name self.memory = memory self.full_message_history: List[BaseMessage] = [] self.next_action_count = 0 self.chain = chain self.output_parser = output_parser self.tools = tools self.feedback_tool = feedback_tool @classmethod def from_llm_and_tools( cls, ai_name: str, ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "goals", "user_input"], token_counter=llm.get_num_tokens, ) human_feedback_tool = HumanInputRun() if human_in_the_loop else None chain = LLMChain(llm=llm, prompt=prompt) return cls( ai_name, memory, chain, output_parser or AutoGPTOutputParser(), tools, feedback_tool=human_feedback_tool, ) def run(self, goals: List[str]) -> str: user_input = ( "Determine which next command to use, " "and respond using the format specified above:" ) # Interaction Loop loop_count = 0 while True: # Discontinue if continuous limit is reached loop_count += 1 # Send message to AI, get response assistant_reply = self.chain.run( goals=goals, messages=self.full_message_history, memory=self.memory, user_input=user_input, ) # Print Assistant thoughts print(assistant_reply) self.full_message_history.append(HumanMessage(content=user_input)) self.full_message_history.append(AIMessage(content=assistant_reply)) # Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f"Validation Error in args: {str(e)}, args: {action.args}" ) except Exception as e: observation = ( f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" ) result = f"Command {tool.name} returned: {observation}" elif action.name == "ERROR": result = f"Error: {action.args}. " else: result = ( f"Unknown command '{action.name}'. " f"Please refer to the 'COMMANDS' list for available " f"commands and only respond in the specified JSON format." ) memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " ) if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: print("EXITING") return "EXITING" memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.full_message_history.append(SystemMessage(content=result))
[ "langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser", "langchain.tools.human.tool.HumanInputRun", "langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt", "langchain.schema.HumanMessage", "langchain.chains.llm.LLMChain", "langchain.schema.AIMessage", "langchain.schema.Document", "langchain.schema.SystemMessage" ]
[((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')]
"""Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.pydantic_v1 import Extra from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import TextSplitter class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=stuff_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
[ "langchain.chains.ReduceDocumentsChain", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.docstore.document.Document", "langchain.chains.llm.LLMChain", "langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager" ]
[((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')]
"""Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.pydantic_v1 import Extra from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import TextSplitter class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=stuff_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
[ "langchain.chains.ReduceDocumentsChain", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.docstore.document.Document", "langchain.chains.llm.LLMChain", "langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager" ]
[((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')]
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.pydantic_v1.root_validator" ]
[((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')]
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.pydantic_v1.root_validator" ]
[((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')]
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.pydantic_v1.root_validator" ]
[((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')]
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.pydantic_v1.root_validator" ]
[((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')]
from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI from langchain.chains import VectorDBQA from langchain.document_loaders import TextLoader from typing import List from langchain.schema import Document import os os.environ['OPENAI_API_KEY'] = "your-api-key" class Genie: def __init__(self, file_path: str): self.file_path = file_path self.loader = TextLoader(self.file_path) self.documents = self.loader.load() self.texts = self.text_split(self.documents) self.vectordb = self.embeddings(self.texts) self.genie = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=self.vectordb) @staticmethod def text_split(documents: TextLoader): text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) return texts @staticmethod def embeddings(texts: List[Document]): embeddings = OpenAIEmbeddings() vectordb = Chroma.from_documents(texts, embeddings) return vectordb def ask(self, query: str): return self.genie.run(query) if __name__ == "__main__": genie = Genie("example.txt") print(genie.ask("How is the wheater like?"))
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.TextLoader", "langchain.llms.OpenAI", "langchain.vectorstores.Chroma.from_documents", "langchain.embeddings.OpenAIEmbeddings" ]
[((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (916, 950), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1112, 1130), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1128, 1130), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1150, 1190), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1171, 1190), False, 'from langchain.vectorstores import Chroma\n'), ((743, 751), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (749, 751), False, 'from langchain.llms import OpenAI\n')]
import logging from pathlib import Path from typing import List, Optional, Tuple from dotenv import load_dotenv load_dotenv() from queue import Empty, Queue from threading import Thread import gradio as gr from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.prompts import HumanMessagePromptTemplate from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage from callback import QueueCallback MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"] DEFAULT_TEMPERATURE = 0.7 ChatHistory = List[str] logging.basicConfig( format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO ) # load up our system prompt default_system_prompt = Path("prompts/system.prompt").read_text() # for the human, we will just inject the text human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}") def on_message_button_click( chat: Optional[ChatOpenAI], message: str, chatbot_messages: ChatHistory, messages: List[BaseMessage], ) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]: if chat is None: # in the queue we will store our streamed tokens queue = Queue() # let's create our default chat chat = ChatOpenAI( model_name=MODELS_NAMES[0], temperature=DEFAULT_TEMPERATURE, streaming=True, callbacks=([QueueCallback(queue)]), ) else: # hacky way to get the queue back queue = chat.callbacks[0].queue job_done = object() logging.info(f"Asking question to GPT, messages={messages}") # let's add the messages to our stuff messages.append(HumanMessage(content=message)) chatbot_messages.append((message, "")) # this is a little wrapper we need cuz we have to add the job_done def task(): chat(messages) queue.put(job_done) # now let's start a thread and run the generation inside it t = Thread(target=task) t.start() # this will hold the content as we generate content = "" # now, we read the next_token from queue and do what it has to be done while True: try: next_token = queue.get(True, timeout=1) if next_token is job_done: break content += next_token chatbot_messages[-1] = (message, content) yield chat, "", chatbot_messages, messages except Empty: continue # finally we can add our reply to messsages messages.append(AIMessage(content=content)) logging.debug(f"reply = {content}") logging.info(f"Done!") return chat, "", chatbot_messages, messages def system_prompt_handler(value: str) -> str: return value def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]: return "", [], [SystemMessage(content=system_prompt)] def on_apply_settings_button_click( system_prompt: str, model_name: str, temperature: float ): logging.info( f"Applying settings: model_name={model_name}, temperature={temperature}" ) chat = ChatOpenAI( model_name=model_name, temperature=temperature, streaming=True, callbacks=[QueueCallback(Queue())], ) # don't forget to nuke our queue chat.callbacks[0].queue.empty() return chat, *on_clear_button_click(system_prompt) # some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py with gr.Blocks( css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;} #chatbot {height: 400px; overflow: auto;}""" ) as demo: system_prompt = gr.State(default_system_prompt) # here we keep our state so multiple user can use the app at the same time! messages = gr.State([SystemMessage(content=default_system_prompt)]) # same thing for the chat, we want one chat per use so callbacks are unique I guess chat = gr.State(None) with gr.Column(elem_id="col_container"): gr.Markdown("# Welcome to GradioGPT! 🌟🚀") gr.Markdown( "An easy to use template. It comes with state and settings managment" ) with gr.Column(): system_prompt_area = gr.TextArea( default_system_prompt, lines=4, label="system prompt", interactive=True ) # we store the value into the state to avoid re rendering of the area system_prompt_area.input( system_prompt_handler, inputs=[system_prompt_area], outputs=[system_prompt], ) system_prompt_button = gr.Button("Set") chatbot = gr.Chatbot() with gr.Column(): message = gr.Textbox(label="chat input") message.submit( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], queue=True, ) message_button = gr.Button("Submit", variant="primary") message_button.click( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], ) with gr.Row(): with gr.Column(): clear_button = gr.Button("Clear") clear_button.click( on_clear_button_click, [system_prompt], [message, chatbot, messages], queue=False, ) with gr.Accordion("Settings", open=False): model_name = gr.Dropdown( choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model" ) temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="temperature", interactive=True, ) apply_settings_button = gr.Button("Apply") apply_settings_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) system_prompt_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) demo.queue() demo.launch()
[ "langchain.schema.AIMessage", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')]
import logging from pathlib import Path from typing import List, Optional, Tuple from dotenv import load_dotenv load_dotenv() from queue import Empty, Queue from threading import Thread import gradio as gr from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.prompts import HumanMessagePromptTemplate from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage from callback import QueueCallback MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"] DEFAULT_TEMPERATURE = 0.7 ChatHistory = List[str] logging.basicConfig( format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO ) # load up our system prompt default_system_prompt = Path("prompts/system.prompt").read_text() # for the human, we will just inject the text human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}") def on_message_button_click( chat: Optional[ChatOpenAI], message: str, chatbot_messages: ChatHistory, messages: List[BaseMessage], ) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]: if chat is None: # in the queue we will store our streamed tokens queue = Queue() # let's create our default chat chat = ChatOpenAI( model_name=MODELS_NAMES[0], temperature=DEFAULT_TEMPERATURE, streaming=True, callbacks=([QueueCallback(queue)]), ) else: # hacky way to get the queue back queue = chat.callbacks[0].queue job_done = object() logging.info(f"Asking question to GPT, messages={messages}") # let's add the messages to our stuff messages.append(HumanMessage(content=message)) chatbot_messages.append((message, "")) # this is a little wrapper we need cuz we have to add the job_done def task(): chat(messages) queue.put(job_done) # now let's start a thread and run the generation inside it t = Thread(target=task) t.start() # this will hold the content as we generate content = "" # now, we read the next_token from queue and do what it has to be done while True: try: next_token = queue.get(True, timeout=1) if next_token is job_done: break content += next_token chatbot_messages[-1] = (message, content) yield chat, "", chatbot_messages, messages except Empty: continue # finally we can add our reply to messsages messages.append(AIMessage(content=content)) logging.debug(f"reply = {content}") logging.info(f"Done!") return chat, "", chatbot_messages, messages def system_prompt_handler(value: str) -> str: return value def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]: return "", [], [SystemMessage(content=system_prompt)] def on_apply_settings_button_click( system_prompt: str, model_name: str, temperature: float ): logging.info( f"Applying settings: model_name={model_name}, temperature={temperature}" ) chat = ChatOpenAI( model_name=model_name, temperature=temperature, streaming=True, callbacks=[QueueCallback(Queue())], ) # don't forget to nuke our queue chat.callbacks[0].queue.empty() return chat, *on_clear_button_click(system_prompt) # some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py with gr.Blocks( css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;} #chatbot {height: 400px; overflow: auto;}""" ) as demo: system_prompt = gr.State(default_system_prompt) # here we keep our state so multiple user can use the app at the same time! messages = gr.State([SystemMessage(content=default_system_prompt)]) # same thing for the chat, we want one chat per use so callbacks are unique I guess chat = gr.State(None) with gr.Column(elem_id="col_container"): gr.Markdown("# Welcome to GradioGPT! 🌟🚀") gr.Markdown( "An easy to use template. It comes with state and settings managment" ) with gr.Column(): system_prompt_area = gr.TextArea( default_system_prompt, lines=4, label="system prompt", interactive=True ) # we store the value into the state to avoid re rendering of the area system_prompt_area.input( system_prompt_handler, inputs=[system_prompt_area], outputs=[system_prompt], ) system_prompt_button = gr.Button("Set") chatbot = gr.Chatbot() with gr.Column(): message = gr.Textbox(label="chat input") message.submit( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], queue=True, ) message_button = gr.Button("Submit", variant="primary") message_button.click( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], ) with gr.Row(): with gr.Column(): clear_button = gr.Button("Clear") clear_button.click( on_clear_button_click, [system_prompt], [message, chatbot, messages], queue=False, ) with gr.Accordion("Settings", open=False): model_name = gr.Dropdown( choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model" ) temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="temperature", interactive=True, ) apply_settings_button = gr.Button("Apply") apply_settings_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) system_prompt_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) demo.queue() demo.launch()
[ "langchain.schema.AIMessage", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')]
""" View stage example selector. | Copyright 2017-2023, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | """ import os import pickle from langchain.prompts import FewShotPromptTemplate, PromptTemplate import numpy as np import pandas as pd from scipy.spatial.distance import cosine # pylint: disable=relative-beyond-top-level from .utils import get_embedding_function, get_cache, hash_query ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples") EXAMPLE_EMBEDDINGS_PATH = os.path.join( EXAMPLES_DIR, "viewstage_embeddings.pkl" ) VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv") VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ) def get_or_create_embeddings(queries): if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH): with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f: example_embeddings = pickle.load(f) else: example_embeddings = {} query_hashes = [] new_hashes = [] new_queries = [] for query in queries: key = hash_query(query) query_hashes.append(key) if key not in example_embeddings: new_hashes.append(key) new_queries.append(query) if new_queries: print("Generating %d embeddings..." % len(new_queries)) model = get_embedding_function() new_embeddings = model(new_queries) for key, embedding in zip(new_hashes, new_embeddings): example_embeddings[key] = embedding if new_queries: print("Saving embeddings to disk...") with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f: pickle.dump(example_embeddings, f) return example_embeddings def has_geo_field(sample_collection): types = list(sample_collection.get_field_schema(flat=True).values()) types = [type(t) for t in types] return any(["Geo" in t.__name__ for t in types]) def get_label_type(sample_collection, field_name): sample = sample_collection.first() field = sample.get_field(field_name) field_type = str(type(field).__name__).lower() field_type = field_type[:-1] if field_type.endswith("s") else field_type return field_type def _replace_run_keys(prompt, runs): if "text_similarity" in runs: prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"]) if "image_similarity" in runs: prompt = prompt.replace( "IMAGE_SIM_KEY", runs["image_similarity"]["key"] ) if "evaluation" in runs: prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"]) if "uniqueness" in runs: prompt = prompt.replace( "UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"] ) return prompt def _count_empty_class_names(label_field): return [list(class_name.values())[0] for class_name in label_field].count( [] ) def _reduce_label_fields(label_fields): label_field_keys = list(label_fields.keys()) if len(label_field_keys) == 0: return None, None elif len(label_field_keys) > 0: empty_counts = [ _count_empty_class_names(label_fields[key]) for key in label_field_keys ] min_empty_count = min(empty_counts) valid_keys = [ key for key, count in zip(label_field_keys, empty_counts) if count == min_empty_count ] return {key: label_fields[key] for key in valid_keys}, min_empty_count def _parse_runs_and_labels(runs, label_fields): reduced_label_fields, count = _reduce_label_fields(label_fields.copy()) reduced_runs = runs.copy() if count is not None and count > 0 and "text_similarity" in reduced_runs: reduced_label_fields = None return reduced_runs, reduced_label_fields def _get_evaluation_type(sample_collection, eval_key): eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls if "openimages" in eval_cls: return "detection" elif "coco" in eval_cls: return "detection" elif "activitynet" in eval_cls: return "detection" elif "classification" in eval_cls: return "classification" return None def _load_examples(): examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip") examples["meta"] = examples["metadata"] examples["contains_match"] = examples["stages"].str.contains("match\(") examples["contains_filter_labels"] = examples["stages"].str.contains( "filter_labels\(" ) examples["mfl"] = ( examples["contains_match"] | examples["contains_filter_labels"] ) examples["hash"] = examples["query"].apply(lambda x: hash_query(x)) queries = examples["query"].tolist() embeddings = get_or_create_embeddings(queries) embeddings = { key: np.array(embeddings[key]) for key in examples["hash"].tolist() } return examples, embeddings def get_examples(): cache = get_cache() keys = ("viewstage_examples", "viewstage_embeddings") if keys[0] not in cache or keys[1] not in cache: cache[keys[0]], cache[keys[1]] = _load_examples() return cache[keys[0]], cache[keys[1]] def _get_filtered_examples(sample_collection, runs, label_fields): examples, embeddings = get_examples() media_type = sample_collection.media_type _filter = examples["media_type"].isin([media_type, "all"]) red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields) geo = has_geo_field(sample_collection) text_sim = "text_similarity" in red_runs image_sim = "image_similarity" in red_runs meta = "metadata" in red_runs eval = "evaluation" in red_runs if red_label_fields or eval: if red_label_fields: label_field_types = list( set( [ get_label_type(sample_collection, field) for field in red_label_fields ] ) ) else: label_field_types = [] if eval: eval_key = red_runs["evaluation"]["key"] eval_types = [_get_evaluation_type(sample_collection, eval_key)] else: eval_types = [] label_types = list(set(label_field_types + eval_types + ["all"])) _filter = _filter & examples["label_type"].isin(label_types) ## contains match() or filter_labels() in stages mfl_cond = red_label_fields and not text_sim conds = [geo, text_sim, image_sim, meta, eval, mfl_cond] strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"] for cond, cond_str in zip(conds, strs): if not cond: _filter = _filter & (examples[cond_str] == False) filtered_examples = examples[_filter] filtered_queries, filtered_stages, hashes = ( filtered_examples["query"].tolist(), filtered_examples["stages"].tolist(), filtered_examples["hash"].tolist(), ) filtered_embeddings = [embeddings[key] for key in hashes] return filtered_queries, filtered_stages, filtered_embeddings def get_similar_examples(sample_collection, query, runs, label_fields): ex_queries, ex_stages, ex_embeddings = _get_filtered_examples( sample_collection, runs, label_fields ) model = get_embedding_function() query_embedding = np.array(model([query])) if len(query_embedding.shape) == 2: query_embedding = query_embedding[0] dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings]) sorted_ix = np.argsort(dists).astype(int) k = 20 similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]] similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]] return [ {"input": sq, "output": ss} for sq, ss in zip(similar_queries, similar_stages) ] def generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ): examples = get_similar_examples( sample_collection, query, runs, label_fields ) example_prompt = VIEW_STAGE_EXAMPLE_PROMPT return FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n", suffix="Input: {text}\nOutput:", input_variables=["text"], ) def generate_view_stage_examples_prompt( sample_collection, query, runs, label_fields ): similar_examples_prompt_template = ( generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ) ) prompt = similar_examples_prompt_template.format(text=query) return _replace_run_keys(prompt, runs)
[ "langchain.prompts.FewShotPromptTemplate", "langchain.prompts.PromptTemplate" ]
[((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')]
""" View stage example selector. | Copyright 2017-2023, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | """ import os import pickle from langchain.prompts import FewShotPromptTemplate, PromptTemplate import numpy as np import pandas as pd from scipy.spatial.distance import cosine # pylint: disable=relative-beyond-top-level from .utils import get_embedding_function, get_cache, hash_query ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples") EXAMPLE_EMBEDDINGS_PATH = os.path.join( EXAMPLES_DIR, "viewstage_embeddings.pkl" ) VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv") VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ) def get_or_create_embeddings(queries): if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH): with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f: example_embeddings = pickle.load(f) else: example_embeddings = {} query_hashes = [] new_hashes = [] new_queries = [] for query in queries: key = hash_query(query) query_hashes.append(key) if key not in example_embeddings: new_hashes.append(key) new_queries.append(query) if new_queries: print("Generating %d embeddings..." % len(new_queries)) model = get_embedding_function() new_embeddings = model(new_queries) for key, embedding in zip(new_hashes, new_embeddings): example_embeddings[key] = embedding if new_queries: print("Saving embeddings to disk...") with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f: pickle.dump(example_embeddings, f) return example_embeddings def has_geo_field(sample_collection): types = list(sample_collection.get_field_schema(flat=True).values()) types = [type(t) for t in types] return any(["Geo" in t.__name__ for t in types]) def get_label_type(sample_collection, field_name): sample = sample_collection.first() field = sample.get_field(field_name) field_type = str(type(field).__name__).lower() field_type = field_type[:-1] if field_type.endswith("s") else field_type return field_type def _replace_run_keys(prompt, runs): if "text_similarity" in runs: prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"]) if "image_similarity" in runs: prompt = prompt.replace( "IMAGE_SIM_KEY", runs["image_similarity"]["key"] ) if "evaluation" in runs: prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"]) if "uniqueness" in runs: prompt = prompt.replace( "UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"] ) return prompt def _count_empty_class_names(label_field): return [list(class_name.values())[0] for class_name in label_field].count( [] ) def _reduce_label_fields(label_fields): label_field_keys = list(label_fields.keys()) if len(label_field_keys) == 0: return None, None elif len(label_field_keys) > 0: empty_counts = [ _count_empty_class_names(label_fields[key]) for key in label_field_keys ] min_empty_count = min(empty_counts) valid_keys = [ key for key, count in zip(label_field_keys, empty_counts) if count == min_empty_count ] return {key: label_fields[key] for key in valid_keys}, min_empty_count def _parse_runs_and_labels(runs, label_fields): reduced_label_fields, count = _reduce_label_fields(label_fields.copy()) reduced_runs = runs.copy() if count is not None and count > 0 and "text_similarity" in reduced_runs: reduced_label_fields = None return reduced_runs, reduced_label_fields def _get_evaluation_type(sample_collection, eval_key): eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls if "openimages" in eval_cls: return "detection" elif "coco" in eval_cls: return "detection" elif "activitynet" in eval_cls: return "detection" elif "classification" in eval_cls: return "classification" return None def _load_examples(): examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip") examples["meta"] = examples["metadata"] examples["contains_match"] = examples["stages"].str.contains("match\(") examples["contains_filter_labels"] = examples["stages"].str.contains( "filter_labels\(" ) examples["mfl"] = ( examples["contains_match"] | examples["contains_filter_labels"] ) examples["hash"] = examples["query"].apply(lambda x: hash_query(x)) queries = examples["query"].tolist() embeddings = get_or_create_embeddings(queries) embeddings = { key: np.array(embeddings[key]) for key in examples["hash"].tolist() } return examples, embeddings def get_examples(): cache = get_cache() keys = ("viewstage_examples", "viewstage_embeddings") if keys[0] not in cache or keys[1] not in cache: cache[keys[0]], cache[keys[1]] = _load_examples() return cache[keys[0]], cache[keys[1]] def _get_filtered_examples(sample_collection, runs, label_fields): examples, embeddings = get_examples() media_type = sample_collection.media_type _filter = examples["media_type"].isin([media_type, "all"]) red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields) geo = has_geo_field(sample_collection) text_sim = "text_similarity" in red_runs image_sim = "image_similarity" in red_runs meta = "metadata" in red_runs eval = "evaluation" in red_runs if red_label_fields or eval: if red_label_fields: label_field_types = list( set( [ get_label_type(sample_collection, field) for field in red_label_fields ] ) ) else: label_field_types = [] if eval: eval_key = red_runs["evaluation"]["key"] eval_types = [_get_evaluation_type(sample_collection, eval_key)] else: eval_types = [] label_types = list(set(label_field_types + eval_types + ["all"])) _filter = _filter & examples["label_type"].isin(label_types) ## contains match() or filter_labels() in stages mfl_cond = red_label_fields and not text_sim conds = [geo, text_sim, image_sim, meta, eval, mfl_cond] strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"] for cond, cond_str in zip(conds, strs): if not cond: _filter = _filter & (examples[cond_str] == False) filtered_examples = examples[_filter] filtered_queries, filtered_stages, hashes = ( filtered_examples["query"].tolist(), filtered_examples["stages"].tolist(), filtered_examples["hash"].tolist(), ) filtered_embeddings = [embeddings[key] for key in hashes] return filtered_queries, filtered_stages, filtered_embeddings def get_similar_examples(sample_collection, query, runs, label_fields): ex_queries, ex_stages, ex_embeddings = _get_filtered_examples( sample_collection, runs, label_fields ) model = get_embedding_function() query_embedding = np.array(model([query])) if len(query_embedding.shape) == 2: query_embedding = query_embedding[0] dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings]) sorted_ix = np.argsort(dists).astype(int) k = 20 similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]] similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]] return [ {"input": sq, "output": ss} for sq, ss in zip(similar_queries, similar_stages) ] def generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ): examples = get_similar_examples( sample_collection, query, runs, label_fields ) example_prompt = VIEW_STAGE_EXAMPLE_PROMPT return FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n", suffix="Input: {text}\nOutput:", input_variables=["text"], ) def generate_view_stage_examples_prompt( sample_collection, query, runs, label_fields ): similar_examples_prompt_template = ( generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ) ) prompt = similar_examples_prompt_template.format(text=query) return _replace_run_keys(prompt, runs)
[ "langchain.prompts.FewShotPromptTemplate", "langchain.prompts.PromptTemplate" ]
[((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')]
import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" class SearchArgsSchema(BaseModel): """Input for SearchGmailTool.""" # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) class GmailSearch(GmailBaseTool): """Tool that searches for messages or threads in Gmail.""" name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.")
[ "langchain.pydantic_v1.Field", "langchain.tools.gmail.utils.clean_email_body" ]
[((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')]
import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" class SearchArgsSchema(BaseModel): """Input for SearchGmailTool.""" # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) class GmailSearch(GmailBaseTool): """Tool that searches for messages or threads in Gmail.""" name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.")
[ "langchain.pydantic_v1.Field", "langchain.tools.gmail.utils.clean_email_body" ]
[((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')]
from langchain import PromptTemplate from codedog.templates import grimoire_en TRANSLATE_PROMPT = PromptTemplate( template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"] )
[ "langchain.PromptTemplate" ]
[((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain import PromptTemplate\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from typing import Any, Dict, Optional, Type # type: ignore import langchain from langchain import LLMChain, PromptTemplate from langchain.experimental.autonomous_agents import AutoGPT from sam.core.utils import logger class AutoGptAgent: agent: AutoGPT def __init__( self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs ): self.agent = AutoGPT.from_llm_and_tools( ai_name=ai_name, ai_role=ai_role, llm=llm, memory=memory, tools=tools, ) def start(self, goals: List[str]): return self.agent.run(goals=goals)
[ "langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools" ]
[((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')]
from typing import Any, Dict, Optional, Type # type: ignore import langchain from langchain import LLMChain, PromptTemplate from langchain.experimental.autonomous_agents import AutoGPT from sam.core.utils import logger class AutoGptAgent: agent: AutoGPT def __init__( self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs ): self.agent = AutoGPT.from_llm_and_tools( ai_name=ai_name, ai_role=ai_role, llm=llm, memory=memory, tools=tools, ) def start(self, goals: List[str]): return self.agent.run(goals=goals)
[ "langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools" ]
[((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')]
from typing import Any, Dict, Optional, Type # type: ignore import langchain from langchain import LLMChain, PromptTemplate from langchain.experimental.autonomous_agents import AutoGPT from sam.core.utils import logger class AutoGptAgent: agent: AutoGPT def __init__( self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs ): self.agent = AutoGPT.from_llm_and_tools( ai_name=ai_name, ai_role=ai_role, llm=llm, memory=memory, tools=tools, ) def start(self, goals: List[str]): return self.agent.run(goals=goals)
[ "langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools" ]
[((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')]
#import os from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file import warnings warnings.filterwarnings("ignore") from langchain.agents.agent_toolkits import create_python_agent from langchain.agents import load_tools, initialize_agent from langchain.agents import AgentType from langchain.tools.python.tool import PythonREPLTool #from langchain.python import PythonREPL from langchain.chat_models import ChatOpenAI import langchain llm = ChatOpenAI(temperature=0) tools = load_tools(["llm-math", "wikipedia"], llm=llm) customer_list = [["Harrison", "Chase"], ["Lang", "Chain"], ["Dolly", "Too"], ["Elle", "Elem"], ["Geoff", "Fusion"], ["Trance", "Former"], ["Jen", "Ayai"]] def do_answer1(): langchain.debug = True agent = create_python_agent( llm, tool=PythonREPLTool(), verbose=True ) answer = agent.run(f"""Sort these customers by \ last name and then first name \ and print the output: {customer_list}""") print(answer) langchain.debug = False def do_answer2(): from langchain.agents import tool from datetime import date langchain.debug = True @tool def time(text: str) -> str: """Returns todays date, use this for any \ questions related to knowing todays date. \ The input should always be an empty string, \ and this function will always return todays \ date - any date mathmatics should occur \ outside this function.""" return str(date.today()) agent = initialize_agent( tools + [time], llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose = True) try: result = agent("whats the date today?") except: # noqa print("exception on external access") print(result) langchain.debug = False if __name__ == "__main__": #do_answer1() do_answer2()
[ "langchain.agents.initialize_agent", "langchain.tools.python.tool.PythonREPLTool", "langchain.agents.load_tools", "langchain.chat_models.ChatOpenAI" ]
[((128, 161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (151, 161), False, 'import warnings\n'), ((489, 514), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (499, 514), False, 'from langchain.chat_models import ChatOpenAI\n'), ((523, 569), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (533, 569), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((72, 85), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (83, 85), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((1666, 1800), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1682, 1800), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((936, 952), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (950, 952), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((1638, 1650), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1648, 1650), False, 'from datetime import date\n')]
from typing import List, Optional, Type from langchain.memory import ( ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory, ) class Memory: @staticmethod def messageHistory(path: str): history = ChatMessageHistory() return history @staticmethod def bufferMemory(path: str): memory = ConversationBufferMemory() return memory @staticmethod def chatSummary(path: str): memory = ConversationSummaryMemory() return memory
[ "langchain.memory.ConversationSummaryMemory", "langchain.memory.ConversationBufferMemory", "langchain.memory.ChatMessageHistory" ]
[((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (466, 468), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((559, 586), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {}), '()\n', (584, 586), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n')]
from typing import List, Optional, Type from langchain.memory import ( ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory, ) class Memory: @staticmethod def messageHistory(path: str): history = ChatMessageHistory() return history @staticmethod def bufferMemory(path: str): memory = ConversationBufferMemory() return memory @staticmethod def chatSummary(path: str): memory = ConversationSummaryMemory() return memory
[ "langchain.memory.ConversationSummaryMemory", "langchain.memory.ConversationBufferMemory", "langchain.memory.ChatMessageHistory" ]
[((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (466, 468), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((559, 586), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {}), '()\n', (584, 586), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n')]
from langchain_community.document_loaders import PyPDFLoader from langchain_community.document_loaders.csv_loader import CSVLoader from langchain_community.document_loaders import HNLoader from langchain.text_splitter import CharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import UnstructuredHTMLLoader from langchain_openai.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.chains import RetrievalQAWithSourcesChain from langchain_openai.llms import OpenAI from constant import openai import os os.environ['OPENAI_API_KEY'] = openai loader = PyPDFLoader("attention is all you need.pdf") data = loader.load() # print(data[0]) loader = CSVLoader(file_path="job_placement.csv") data = loader.load() # print(data[0]) loader = HNLoader("https://news.ycombinator.com") data = loader.load() # print(data[0]) quote = "one Machine can do the work of fifty ordinary humans, No machine can do the" \ "work of one extraordinary human." ct_splitter = CharacterTextSplitter( separator='.', chunk_size=24, chunk_overlap=3 ) # docs = ct_splitter.split_text(quote) # print(docs) rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, ) # docs = rc_splitter.split_text(quote) # print(docs) loader = UnstructuredHTMLLoader("data.html") data = loader.load() rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, separators='.', ) # docs = rc_splitter.split_documents(data) # print(docs) quote = "There is a kingdom of lychee fruit that are alive and thriving in Iceland, but they feel " \ "taken advantage of and are not fast enough for you." splitter = RecursiveCharacterTextSplitter( chunk_size=40, chunk_overlap=10, ) docs = splitter.split_text(quote) embeddings = OpenAIEmbeddings(openai_api_key=openai) vectordb = Chroma( persist_directory="data", embedding_function=embeddings ) vectordb.persist() docstorage = Chroma.from_texts(docs,embeddings) qa = RetrievalQA.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever() ) # query = "Where do lychee fruit live?" # print(qa.invoke(query)) quote = "There is a kingdom of lycee fruit that are alive and thriving in Iceland, but they fee" \ "taken advantage of and are not fast enough for you." qa1 = RetrievalQAWithSourcesChain.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever(), ) results = qa1({'question':'What is the primary architecture presented in the document?'},return_only_outputs=True) print(results)
[ "langchain_community.document_loaders.PyPDFLoader", "langchain.text_splitter.CharacterTextSplitter", "langchain_openai.llms.OpenAI", "langchain_community.document_loaders.csv_loader.CSVLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_community.document_loaders.UnstructuredHTMLLoader", "langchain_community.document_loaders.HNLoader", "langchain_community.vectorstores.Chroma.from_texts", "langchain_community.vectorstores.Chroma", "langchain_openai.embeddings.OpenAIEmbeddings" ]
[((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': '"""job_placement.csv"""'}), "(file_path='job_placement.csv')\n", (847, 878), False, 'from langchain_community.document_loaders.csv_loader import CSVLoader\n'), ((931, 971), 'langchain_community.document_loaders.HNLoader', 'HNLoader', (['"""https://news.ycombinator.com"""'], {}), "('https://news.ycombinator.com')\n", (939, 971), False, 'from langchain_community.document_loaders import HNLoader\n'), ((1166, 1234), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(24)', 'chunk_overlap': '(3)'}), "(separator='.', chunk_size=24, chunk_overlap=3)\n", (1187, 1234), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1327, 1389), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)'}), '(chunk_size=24, chunk_overlap=3)\n', (1357, 1389), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1473, 1508), 'langchain_community.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['"""data.html"""'], {}), "('data.html')\n", (1495, 1508), False, 'from langchain_community.document_loaders import UnstructuredHTMLLoader\n'), ((1548, 1626), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)', 'separators': '"""."""'}), "(chunk_size=24, chunk_overlap=3, separators='.')\n", (1578, 1626), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1889, 1952), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(40)', 'chunk_overlap': '(10)'}), '(chunk_size=40, chunk_overlap=10)\n', (1919, 1952), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2020, 2059), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai'}), '(openai_api_key=openai)\n', (2036, 2059), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((2074, 2137), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""data"""', 'embedding_function': 'embeddings'}), "(persist_directory='data', embedding_function=embeddings)\n", (2080, 2137), False, 'from langchain_community.vectorstores import Chroma\n'), ((2189, 2224), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2206, 2224), False, 'from langchain_community.vectorstores import Chroma\n'), ((2272, 2315), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2278, 2315), False, 'from langchain_openai.llms import OpenAI\n'), ((2688, 2731), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2694, 2731), False, 'from langchain_openai.llms import OpenAI\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.utils.get_from_env", "langchain.docstore.document.Document" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.utils.get_from_env", "langchain.docstore.document.Document" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.utils.get_from_env", "langchain.docstore.document.Document" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.utils.get_from_env", "langchain.docstore.document.Document" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6 from io import StringIO import sys import os from typing import Dict, Optional from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents.tools import Tool from langchain.llms import OpenAI base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') class PythonREPL: """Simulates a standalone Python REPL.""" def __init__(self): pass def run(self, command: str) -> str: """Run command and returns anything printed.""" old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals()) sys.stdout = old_stdout output = mystdout.getvalue() except Exception as e: sys.stdout = old_stdout output = str(e) return output llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name) python_repl = Tool( "Python REPL", PythonREPL().run, """A Python shell. Use this to execute python commands. Input should be a valid python command. If you expect output it should be printed out.""", ) tools = [python_repl] agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) agent.run("What is the 10th fibonacci number?")
[ "langchain.agents.initialize_agent", "langchain.llms.OpenAI" ]
[((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')]
import time from typing import List import pandas as pd from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.vectorstores import VectorStore from mindsdb.integrations.handlers.rag_handler.settings import ( PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents, ) from mindsdb.utilities import log logger = log.getLogger(__name__) def validate_document(doc) -> bool: """Check an individual document.""" # Example checks if not isinstance(doc, Document): return False if not doc.page_content: return False return True def validate_documents(documents) -> bool: """Validate document list format.""" if not isinstance(documents, list): return False if not documents: return False # Check fields/format of a document return all([validate_document(doc) for doc in documents]) class RAGIngestor: """A class for converting a dataframe and/or url to a vectorstore embedded with a given embeddings model""" def __init__( self, args: RAGBaseParameters, df: pd.DataFrame, ): self.args = args self.df = df self.embeddings_model_name = args.embeddings_model_name self.vector_store = VectorStoreFactory.get_vectorstore_class( args.vector_store_name ) def split_documents(self, chunk_size, chunk_overlap) -> list: # Load documents and split in chunks logger.info(f"Loading documents from input data") documents = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) if self.df is not None: # if user provides a dataframe, load documents from dataframe documents.extend( df_to_documents( df=self.df, page_content_columns=self.args.context_columns, url_column_name=self.args.url_column_name, ) ) if self.args.url: # if user provides a url, load documents from url documents.extend(url_to_documents(self.args.url)) n_tokens = sum([len(doc.page_content) for doc in documents]) # split documents into chunks of text texts = text_splitter.split_documents(documents) logger.info(f"Loaded {len(documents)} documents from input data") logger.info(f"Total number of tokens: {n_tokens}") logger.info(f"Split into {len(texts)} chunks of text (tokens)") return texts def create_db_from_documents(self, documents, embeddings_model) -> VectorStore: """Create DB from documents.""" if self.args.vector_store_name == "chromadb": return self.vector_store.from_documents( documents=documents, embedding=embeddings_model, client=get_chroma_client( persist_directory=self.args.vector_store_storage_path ), collection_name=self.args.collection_name, ) else: return self.create_db_from_texts(documents, embeddings_model) def create_db_from_texts(self, documents, embeddings_model) -> VectorStore: """Create DB from text content.""" texts = [doc.page_content for doc in documents] metadata = [doc.metadata for doc in documents] return self.vector_store.from_texts( texts=texts, embedding=embeddings_model, metadatas=metadata ) @staticmethod def _create_batch_embeddings(documents: List[Document], embeddings_batch_size): """ create batch of document embeddings """ for i in range(0, len(documents), embeddings_batch_size): yield documents[i: i + embeddings_batch_size] def embeddings_to_vectordb(self) -> None: """Create vectorstore from documents and store locally.""" start_time = time.time() # Load documents and splits in chunks (if not in evaluation_type mode) documents = self.split_documents( chunk_size=self.args.chunk_size, chunk_overlap=self.args.chunk_overlap ) # Load embeddings model embeddings_model = load_embeddings_model( self.embeddings_model_name, self.args.use_gpu ) logger.info(f"Creating vectorstore from documents") if not validate_documents(documents): raise ValueError("Invalid documents") try: db = self.create_db_from_documents(documents, embeddings_model) except Exception as e: raise Exception( f"Error loading embeddings to {self.args.vector_store_name}: {e}" ) config = PersistedVectorStoreSaverConfig( vector_store_name=self.args.vector_store_name, vector_store=db, persist_directory=self.args.vector_store_storage_path, collection_name=self.args.collection_name, ) vector_store_saver = PersistedVectorStoreSaver(config) vector_store_saver.save_vector_store(db) db = None # Free up memory end_time = time.time() elapsed_time = round(end_time - start_time) logger.info(f"Finished creating {self.args.vector_store_name} from texts, it has been " f"persisted to {self.args.vector_store_storage_path}") time_minutes = round(elapsed_time / 60) if time_minutes > 1: logger.info(f"Elapsed time: {time_minutes} minutes") else: logger.info(f"Elapsed time: {elapsed_time} seconds")
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((539, 562), 'mindsdb.utilities.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (552, 562), False, 'from mindsdb.utilities import log\n'), ((1455, 1519), 'mindsdb.integrations.handlers.rag_handler.settings.VectorStoreFactory.get_vectorstore_class', 'VectorStoreFactory.get_vectorstore_class', (['args.vector_store_name'], {}), '(args.vector_store_name)\n', (1495, 1519), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((1761, 1848), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (1791, 1848), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4190, 4201), 'time.time', 'time.time', ([], {}), '()\n', (4199, 4201), False, 'import time\n'), ((4477, 4545), 'mindsdb.integrations.handlers.rag_handler.settings.load_embeddings_model', 'load_embeddings_model', (['self.embeddings_model_name', 'self.args.use_gpu'], {}), '(self.embeddings_model_name, self.args.use_gpu)\n', (4498, 4545), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((4990, 5193), 'mindsdb.integrations.handlers.rag_handler.settings.PersistedVectorStoreSaverConfig', 'PersistedVectorStoreSaverConfig', ([], {'vector_store_name': 'self.args.vector_store_name', 'vector_store': 'db', 'persist_directory': 'self.args.vector_store_storage_path', 'collection_name': 'self.args.collection_name'}), '(vector_store_name=self.args.\n vector_store_name, vector_store=db, persist_directory=self.args.\n vector_store_storage_path, collection_name=self.args.collection_name)\n', (5021, 5193), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((5273, 5306), 'mindsdb.integrations.handlers.rag_handler.settings.PersistedVectorStoreSaver', 'PersistedVectorStoreSaver', (['config'], {}), '(config)\n', (5298, 5306), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((5414, 5425), 'time.time', 'time.time', ([], {}), '()\n', (5423, 5425), False, 'import time\n'), ((2019, 2141), 'mindsdb.integrations.handlers.rag_handler.settings.df_to_documents', 'df_to_documents', ([], {'df': 'self.df', 'page_content_columns': 'self.args.context_columns', 'url_column_name': 'self.args.url_column_name'}), '(df=self.df, page_content_columns=self.args.context_columns,\n url_column_name=self.args.url_column_name)\n', (2034, 2141), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((2349, 2380), 'mindsdb.integrations.handlers.rag_handler.settings.url_to_documents', 'url_to_documents', (['self.args.url'], {}), '(self.args.url)\n', (2365, 2380), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n'), ((3121, 3193), 'mindsdb.integrations.handlers.rag_handler.settings.get_chroma_client', 'get_chroma_client', ([], {'persist_directory': 'self.args.vector_store_storage_path'}), '(persist_directory=self.args.vector_store_storage_path)\n', (3138, 3193), False, 'from mindsdb.integrations.handlers.rag_handler.settings import PersistedVectorStoreSaver, PersistedVectorStoreSaverConfig, RAGBaseParameters, VectorStoreFactory, df_to_documents, get_chroma_client, load_embeddings_model, url_to_documents\n')]
""" Multilingual retrieval based conversation system backed by ChatGPT """ import argparse import os from colossalqa.data_loader.document_loader import DocumentLoader from colossalqa.memory import ConversationBufferWithSummary from colossalqa.retriever import CustomRetriever from langchain import LLMChain from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceEmbeddings from langchain.llms import OpenAI from langchain.prompts.prompt import PromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Multilingual retrieval based conversation system backed by ChatGPT") parser.add_argument("--open_ai_key_path", type=str, default=None, help="path to the model") parser.add_argument( "--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing" ) args = parser.parse_args() if not os.path.exists(args.sql_file_path): os.makedirs(args.sql_file_path) # Setup openai key # Set env var OPENAI_API_KEY or load from a file openai_key = open(args.open_ai_key_path).read() os.environ["OPENAI_API_KEY"] = openai_key llm = OpenAI(temperature=0.6) information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True) # VectorDB embedding = HuggingFaceEmbeddings( model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False} ) # Define memory with summarization ability memory = ConversationBufferWithSummary(llm=llm) # Load data to vector store print("Select files for constructing retriever") documents = [] while True: file = input("Enter a file path or press Enter directory without input to exit:").strip() if file == "": break data_name = input("Enter a short description of the data:") retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0) splits = text_splitter.split_documents(retriever_data) documents.extend(splits) # Create retriever information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding) prompt_template = """Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If the answer cannot be inferred based on the given context, please don't share false information. Use the context and chat history to respond to the human's input at the end or carry on the conversation. You should generate one response only. No following up is needed. context: {context} chat history {chat_history} Human: {question} Assistant:""" prompt_template_disambiguate = """You are a helpful, respectful and honest assistant. You always follow the instruction. Please replace any ambiguous references in the given sentence with the specific names or entities mentioned in the chat history or just output the original sentence if no chat history is provided or if the sentence doesn't contain ambiguous references. Your output should be the disambiguated sentence itself (in the same line as "disambiguated sentence:") and contain nothing else. Here is an example: Chat history: Human: I have a friend, Mike. Do you know him? Assistant: Yes, I know a person named Mike sentence: What's his favorite food? disambiguated sentence: What's Mike's favorite food? END OF EXAMPLE Chat history: {chat_history} sentence: {input} disambiguated sentence:""" PROMPT = PromptTemplate(template=prompt_template, input_variables=["question", "chat_history", "context"]) memory.initiate_document_retrieval_chain( llm, PROMPT, information_retriever, chain_type_kwargs={ "chat_history": "", }, ) PROMPT_DISAMBIGUATE = PromptTemplate( template=prompt_template_disambiguate, input_variables=["chat_history", "input"] ) llm_chain = RetrievalQA.from_chain_type( llm=llm, verbose=False, chain_type="stuff", retriever=information_retriever, chain_type_kwargs={"prompt": PROMPT, "memory": memory}, ) llm_chain_disambiguate = LLMChain(llm=llm, prompt=PROMPT_DISAMBIGUATE) def disambiguity(input): out = llm_chain_disambiguate.run({"input": input, "chat_history": memory.buffer}) return out.split("\n")[0] information_retriever.set_rephrase_handler(disambiguity) while True: user_input = input("User: ") if " end " in user_input: print("Agent: Happy to chat with you :)") break agent_response = llm_chain.run(user_input) agent_response = agent_response.split("\n")[0] print(f"Agent: {agent_response}")
[ "langchain.prompts.prompt.PromptTemplate", "langchain.LLMChain", "langchain.embeddings.HuggingFaceEmbeddings", "langchain.chains.RetrievalQA.from_chain_type", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.llms.OpenAI" ]
[((599, 709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Multilingual retrieval based conversation system backed by ChatGPT"""'}), "(description=\n 'Multilingual retrieval based conversation system backed by ChatGPT')\n", (622, 709), False, 'import argparse\n'), ((1258, 1281), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (1264, 1281), False, 'from langchain.llms import OpenAI\n'), ((1311, 1379), 'colossalqa.retriever.CustomRetriever', 'CustomRetriever', ([], {'k': '(3)', 'sql_file_path': 'args.sql_file_path', 'verbose': '(True)'}), '(k=3, sql_file_path=args.sql_file_path, verbose=True)\n', (1326, 1379), False, 'from colossalqa.retriever import CustomRetriever\n'), ((1411, 1546), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""moka-ai/m3e-base"""', 'model_kwargs': "{'device': 'cpu'}", 'encode_kwargs': "{'normalize_embeddings': False}"}), "(model_name='moka-ai/m3e-base', model_kwargs={'device':\n 'cpu'}, encode_kwargs={'normalize_embeddings': False})\n", (1432, 1546), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1618, 1656), 'colossalqa.memory.ConversationBufferWithSummary', 'ConversationBufferWithSummary', ([], {'llm': 'llm'}), '(llm=llm)\n', (1647, 1656), False, 'from colossalqa.memory import ConversationBufferWithSummary\n'), ((3951, 4052), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['question', 'chat_history', 'context']"}), "(template=prompt_template, input_variables=['question',\n 'chat_history', 'context'])\n", (3965, 4052), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((4260, 4361), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template_disambiguate', 'input_variables': "['chat_history', 'input']"}), "(template=prompt_template_disambiguate, input_variables=[\n 'chat_history', 'input'])\n", (4274, 4361), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((4388, 4556), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'verbose': '(False)', 'chain_type': '"""stuff"""', 'retriever': 'information_retriever', 'chain_type_kwargs': "{'prompt': PROMPT, 'memory': memory}"}), "(llm=llm, verbose=False, chain_type='stuff',\n retriever=information_retriever, chain_type_kwargs={'prompt': PROMPT,\n 'memory': memory})\n", (4415, 4556), False, 'from langchain.chains import RetrievalQA\n'), ((4625, 4670), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT_DISAMBIGUATE'}), '(llm=llm, prompt=PROMPT_DISAMBIGUATE)\n', (4633, 4670), False, 'from langchain import LLMChain\n'), ((996, 1030), 'os.path.exists', 'os.path.exists', (['args.sql_file_path'], {}), '(args.sql_file_path)\n', (1010, 1030), False, 'import os\n'), ((1040, 1071), 'os.makedirs', 'os.makedirs', (['args.sql_file_path'], {}), '(args.sql_file_path)\n', (1051, 1071), False, 'import os\n'), ((2114, 2177), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(0)'}), '(chunk_size=200, chunk_overlap=0)\n', (2144, 2177), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n')]
from templates.common.suffix import suffix from templates.common.format_instructions import format_instructions from templates.common.docs_system_instructions import docs_system_instructions from langchain.schema import ( # AIMessage, HumanMessage, SystemMessage ) from langchain.tools.json.tool import JsonSpec from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.chat_models import ChatOpenAI, AzureChatOpenAI from langchain.llms.openai import OpenAI from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor from langchain.chains import LLMChain from config.config import config import openai # required from dotenv import load_dotenv load_dotenv() class OpenAPIExplorerTool: @staticmethod def create_tools(docs): json_spec = JsonSpec(dict_=docs) json_toolkit = JsonToolkit(spec=json_spec) tools = json_toolkit.get_tools() return tools class PipedreamOpenAPIAgent: def __init__(self, docs, templates, auth_example, parsed_common_files): system_instructions = format_template( f"{templates.system_instructions(auth_example, parsed_common_files)}\n{docs_system_instructions}") tools = OpenAPIExplorerTool.create_tools(docs) tool_names = [tool.name for tool in tools] prompt_template = ZeroShotAgent.create_prompt( tools=tools, prefix=system_instructions, suffix=suffix, format_instructions=format_instructions, input_variables=['input', 'agent_scratchpad'] ) llm_chain = LLMChain(llm=get_llm(), prompt=prompt_template) agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) verbose = True if config['logging']['level'] == 'DEBUG' else False self.agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=verbose) def run(self, input): try: result = self.agent_executor.run(input) except Exception as e: result = str(e) if "I don't know" in result: return "I don't know" if '```' not in result: raise e return format_result(result) def format_template(text): return text.replace("{", "{{").replace("}", "}}") # escape curly braces def format_result(result): if '```' in result: if '```javascript' in result: result = result.split('```javascript')[1].split('```')[0].strip() else: result = result.split('```')[1].split('```')[0].strip() return result def create_user_prompt(prompt, urls_content): if len(urls_content) == 0: return prompt + "\n\n" user_prompt = f"{prompt}\n\n## API docs\n\n" for item in urls_content: user_prompt += f"\n\n### {item['url']}\n\n{item['content']}" return user_prompt + "\n\n" def get_llm(): if config['openai_api_type'] == "azure": azure_config = config["azure"] return AzureChatOpenAI(deployment_name=azure_config['deployment_name'], model_name=azure_config["model"], temperature=config["temperature"], request_timeout=300) else: openai_config = config["openai"] print(f"Using OpenAI API: {openai_config['model']}") return ChatOpenAI( model_name=openai_config["model"], temperature=config["temperature"]) def ask_agent(prompt, docs, templates, auth_example, parsed_common_files, urls_content): agent = PipedreamOpenAPIAgent( docs, templates, auth_example, parsed_common_files) user_prompt = create_user_prompt(prompt, urls_content) result = agent.run(user_prompt) return result def no_docs(prompt, templates, auth_example, parsed_common_files, urls_content, normal_order=True): user_prompt = create_user_prompt(prompt, urls_content) pd_instructions = format_template( templates.system_instructions(auth_example, parsed_common_files)) result = get_llm()(messages=[ SystemMessage(content="You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it's important that it's accurate and complete."), HumanMessage(content=user_prompt + pd_instructions if normal_order else pd_instructions+user_prompt), ]) return format_result(result.content)
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.agents.agent_toolkits.json.toolkit.JsonToolkit", "langchain.agents.ZeroShotAgent.create_prompt", "langchain.agents.ZeroShotAgent", "langchain.chat_models.ChatOpenAI", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage", "langchain.chat_models.AzureChatOpenAI", "langchain.tools.json.tool.JsonSpec" ]
[((701, 714), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (712, 714), False, 'from dotenv import load_dotenv\n'), ((810, 830), 'langchain.tools.json.tool.JsonSpec', 'JsonSpec', ([], {'dict_': 'docs'}), '(dict_=docs)\n', (818, 830), False, 'from langchain.tools.json.tool import JsonSpec\n'), ((854, 881), 'langchain.agents.agent_toolkits.json.toolkit.JsonToolkit', 'JsonToolkit', ([], {'spec': 'json_spec'}), '(spec=json_spec)\n', (865, 881), False, 'from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit\n'), ((1343, 1524), 'langchain.agents.ZeroShotAgent.create_prompt', 'ZeroShotAgent.create_prompt', ([], {'tools': 'tools', 'prefix': 'system_instructions', 'suffix': 'suffix', 'format_instructions': 'format_instructions', 'input_variables': "['input', 'agent_scratchpad']"}), "(tools=tools, prefix=system_instructions, suffix\n =suffix, format_instructions=format_instructions, input_variables=[\n 'input', 'agent_scratchpad'])\n", (1370, 1524), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((1670, 1730), 'langchain.agents.ZeroShotAgent', 'ZeroShotAgent', ([], {'llm_chain': 'llm_chain', 'allowed_tools': 'tool_names'}), '(llm_chain=llm_chain, allowed_tools=tool_names)\n', (1683, 1730), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((1837, 1914), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': 'verbose'}), '(agent=agent, tools=tools, verbose=verbose)\n', (1871, 1914), False, 'from langchain.agents import create_json_agent, ZeroShotAgent, AgentExecutor\n'), ((3038, 3201), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': "azure_config['deployment_name']", 'model_name': "azure_config['model']", 'temperature': "config['temperature']", 'request_timeout': '(300)'}), "(deployment_name=azure_config['deployment_name'], model_name\n =azure_config['model'], temperature=config['temperature'],\n request_timeout=300)\n", (3053, 3201), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n'), ((3351, 3436), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "openai_config['model']", 'temperature': "config['temperature']"}), "(model_name=openai_config['model'], temperature=config['temperature']\n )\n", (3361, 3436), False, 'from langchain.chat_models import ChatOpenAI, AzureChatOpenAI\n'), ((4061, 4430), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it\'s important that it\'s accurate and complete."""'}), '(content=\n "You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it\'s important that it\'s accurate and complete."\n )\n', (4074, 4430), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((4430, 4537), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '(user_prompt + pd_instructions if normal_order else pd_instructions +\n user_prompt)'}), '(content=user_prompt + pd_instructions if normal_order else \n pd_instructions + user_prompt)\n', (4442, 4537), False, 'from langchain.schema import HumanMessage, SystemMessage\n')]
import os import threading from chainlit.config import config from chainlit.logger import logger def init_lc_cache(): use_cache = config.project.cache is True and config.run.no_cache is False if use_cache: try: import langchain except ImportError: return from langchain.cache import SQLiteCache from langchain.globals import set_llm_cache if config.project.lc_cache_path is not None: set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path)) if not os.path.exists(config.project.lc_cache_path): logger.info( f"LangChain cache created at: {config.project.lc_cache_path}" ) _cache = {} _cache_lock = threading.Lock() def cache(func): def wrapper(*args, **kwargs): # Create a cache key based on the function name, arguments, and keyword arguments cache_key = ( (func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items())) ) with _cache_lock: # Check if the result is already in the cache if cache_key not in _cache: # If not, call the function and store the result in the cache _cache[cache_key] = func(*args, **kwargs) return _cache[cache_key] return wrapper
[ "langchain.cache.SQLiteCache" ]
[((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')]
import json from typing import Any, List, Tuple import requests from taskweaver.plugin import Plugin, register_plugin # response entry format: (title, url, snippet) ResponseEntry = Tuple[str, str, str] def browse_page( query: str, urls: List[str], top_k: int = 3, chunk_size: int = 1000, chunk_overlap: int = 250, ) -> list[dict[str, Any]]: try: from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import AsyncHtmlLoader from langchain_community.document_transformers import Html2TextTransformer except ImportError: raise ImportError("Please install langchain/langchain-community first.") loader = AsyncHtmlLoader(web_path=urls) docs = loader.load() html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, ) # Split splits = text_splitter.split_documents(docs_transformed) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS vector_store = FAISS.from_documents( splits, HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"), ) result = vector_store.similarity_search( query=query, k=top_k, ) chunks = [ { "metadata": r.metadata, "snippet": r.page_content, } for r in result ] return chunks @register_plugin class WebSearch(Plugin): def search_query(self, query: str) -> List[ResponseEntry]: api_provider = self.config.get("api_provider", "google_custom_search") result_count = int(self.config.get("result_count", 3)) if api_provider == "google_custom_search": return self._search_google_custom_search(query, cnt=result_count) elif api_provider == "bing": return self._search_bing(query, cnt=result_count) else: raise ValueError("Invalid API provider. Please check your config file.") def __call__(self, queries: List[str], browse: bool = True) -> str: query_results = [] query_urls = set() for query in queries: query_results.extend([r for r in self.search_query(query) if r[1] not in query_urls]) query_urls.update([r[1] for r in query_results]) if not browse: return f"WebSearch has done searching for `{queries}`.\n" + self.ctx.wrap_text_with_delimiter_temporal( "\n```json\n" + json.dumps(query_results, indent=4) + "```\n", ) else: return f"WebSearch has done searching for `{queries}`.\n" + self.ctx.wrap_text_with_delimiter_temporal( "\n```json\n" + json.dumps(browse_page(",".join(queries), list(query_urls)), indent=4) + "```\n", ) def _search_google_custom_search(self, query: str, cnt: int) -> List[ResponseEntry]: api_key = self.config.get("google_api_key") search_engine_id = self.config.get("google_search_engine_id") url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={search_engine_id}&q={query}" if cnt > 0: url += f"&num={cnt}" response = requests.get(url) result_list: List[ResponseEntry] = [] for item in response.json()["items"]: result_list.append((item["title"], item["link"], item["snippet"])) return result_list def _search_bing(self, query: str, cnt: int) -> List[ResponseEntry]: api_key = self.config.get("bing_api_key") url = f"https://api.bing.microsoft.com/v7.0/search?q={query}" if cnt > 0: url += f"&count={cnt}" response = requests.get(url, headers={"Ocp-Apim-Subscription-Key": api_key}) result_list: List[ResponseEntry] = [] for item in response.json()["webPages"]["value"]: result_list.append((item["name"], item["url"], item["snippet"])) return result_list
[ "langchain_community.document_transformers.Html2TextTransformer", "langchain_community.document_loaders.AsyncHtmlLoader", "langchain_community.embeddings.HuggingFaceEmbeddings", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((725, 755), 'langchain_community.document_loaders.AsyncHtmlLoader', 'AsyncHtmlLoader', ([], {'web_path': 'urls'}), '(web_path=urls)\n', (740, 755), False, 'from langchain_community.document_loaders import AsyncHtmlLoader\n'), ((798, 820), 'langchain_community.document_transformers.Html2TextTransformer', 'Html2TextTransformer', ([], {}), '()\n', (818, 820), False, 'from langchain_community.document_transformers import Html2TextTransformer\n'), ((901, 988), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (931, 988), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1272, 1324), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1293, 1324), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((3355, 3372), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3367, 3372), False, 'import requests\n'), ((3839, 3904), 'requests.get', 'requests.get', (['url'], {'headers': "{'Ocp-Apim-Subscription-Key': api_key}"}), "(url, headers={'Ocp-Apim-Subscription-Key': api_key})\n", (3851, 3904), False, 'import requests\n'), ((2646, 2681), 'json.dumps', 'json.dumps', (['query_results'], {'indent': '(4)'}), '(query_results, indent=4)\n', (2656, 2681), False, 'import json\n')]
import os from typing import Optional from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.schema import BaseMessage, HumanMessage from rebyte_langchain.rebyte_langchain import RebyteEndpoint from realtime_ai_character.llm.base import ( AsyncCallbackAudioHandler, AsyncCallbackTextHandler, LLM, ) from realtime_ai_character.logger import get_logger from realtime_ai_character.utils import Character, timed logger = get_logger(__name__) class RebyteLlm(LLM): def __init__(self): self.rebyte_api_key = os.getenv("REBYTE_API_KEY", "") self.chat_rebyte = RebyteEndpoint( rebyte_api_key=self.rebyte_api_key, client=None, streaming=True ) self.config = {} def get_config(self): return self.config def _set_character_config(self, character: Character): self.chat_rebyte.project_id = character.rebyte_api_project_id self.chat_rebyte.agent_id = character.rebyte_api_agent_id if character.rebyte_api_version is not None: self.chat_rebyte.version = character.rebyte_api_version def _set_user_config(self, user_id: str): self.chat_rebyte.session_id = user_id @timed async def achat( self, history: list[BaseMessage], user_input: str, user_id: str, character: Character, callback: AsyncCallbackTextHandler, audioCallback: Optional[AsyncCallbackAudioHandler] = None, metadata: Optional[dict] = None, *args, **kwargs, ) -> str: # 1. Add user input to history # delete the first system message in history. just use the system prompt in rebyte platform history.pop(0) history.append(HumanMessage(content=user_input)) # 2. Generate response # set project_id and agent_id for character self._set_character_config(character=character) # set session_id for user self._set_user_config(user_id) callbacks = [callback, StreamingStdOutCallbackHandler()] if audioCallback is not None: callbacks.append(audioCallback) response = await self.chat_rebyte.agenerate( [history], callbacks=callbacks, metadata=metadata, ) logger.info(f"Response: {response}") return response.generations[0][0].text
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.schema.HumanMessage" ]
[((473, 493), 'realtime_ai_character.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (483, 493), False, 'from realtime_ai_character.logger import get_logger\n'), ((572, 603), 'os.getenv', 'os.getenv', (['"""REBYTE_API_KEY"""', '""""""'], {}), "('REBYTE_API_KEY', '')\n", (581, 603), False, 'import os\n'), ((632, 711), 'rebyte_langchain.rebyte_langchain.RebyteEndpoint', 'RebyteEndpoint', ([], {'rebyte_api_key': 'self.rebyte_api_key', 'client': 'None', 'streaming': '(True)'}), '(rebyte_api_key=self.rebyte_api_key, client=None, streaming=True)\n', (646, 711), False, 'from rebyte_langchain.rebyte_langchain import RebyteEndpoint\n'), ((1768, 1800), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (1780, 1800), False, 'from langchain.schema import BaseMessage, HumanMessage\n'), ((2046, 2078), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (2076, 2078), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
from celery import shared_task from langchain.text_splitter import RecursiveCharacterTextSplitter from shared.models.opencopilot_db.pdf_data_sources import ( insert_pdf_data_source, update_pdf_data_source_status, ) from langchain.document_loaders import UnstructuredMarkdownLoader from shared.utils.opencopilot_utils import ( get_embeddings, StoreOptions, get_file_path, ) from shared.utils.opencopilot_utils.init_vector_store import init_vector_store from workers.utils.remove_escape_sequences import remove_escape_sequences @shared_task def process_markdown(file_name: str, bot_id: str): try: insert_pdf_data_source(chatbot_id=bot_id, file_name=file_name, status="PENDING") loader = UnstructuredMarkdownLoader(get_file_path(file_name)) raw_docs = loader.load() for doc in raw_docs: doc.page_content = remove_escape_sequences(doc.page_content) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, length_function=len ) docs = text_splitter.split_documents(raw_docs) embeddings = get_embeddings() init_vector_store( docs, StoreOptions(namespace="knowledgebase", metadata={"bot_id": bot_id}), ) update_pdf_data_source_status( chatbot_id=bot_id, file_name=file_name, status="COMPLETED" ) except Exception as e: update_pdf_data_source_status( chatbot_id=bot_id, file_name=file_name, status="FAILED" ) print(f"Error processing {file_name}:", e) @shared_task def retry_failed_markdown_crawl(chatbot_id: str, file_name: str): """Re-runs a failed PDF crawl. Args: chatbot_id: The ID of the chatbot. file_name: The name of the PDF file to crawl. """ update_pdf_data_source_status( chatbot_id=chatbot_id, file_name=file_name, status="PENDING" ) try: process_markdown(file_name=file_name, bot_id=chatbot_id) except Exception as e: update_pdf_data_source_status( chatbot_id=chatbot_id, file_name=file_name, status="FAILED" ) print(f"Error reprocessing {file_name}:", e)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((1830, 1925), 'shared.models.opencopilot_db.pdf_data_sources.update_pdf_data_source_status', 'update_pdf_data_source_status', ([], {'chatbot_id': 'chatbot_id', 'file_name': 'file_name', 'status': '"""PENDING"""'}), "(chatbot_id=chatbot_id, file_name=file_name,\n status='PENDING')\n", (1859, 1925), False, 'from shared.models.opencopilot_db.pdf_data_sources import insert_pdf_data_source, update_pdf_data_source_status\n'), ((630, 715), 'shared.models.opencopilot_db.pdf_data_sources.insert_pdf_data_source', 'insert_pdf_data_source', ([], {'chatbot_id': 'bot_id', 'file_name': 'file_name', 'status': '"""PENDING"""'}), "(chatbot_id=bot_id, file_name=file_name, status='PENDING'\n )\n", (652, 715), False, 'from shared.models.opencopilot_db.pdf_data_sources import insert_pdf_data_source, update_pdf_data_source_status\n'), ((941, 1032), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), '(chunk_size=1000, chunk_overlap=200,\n length_function=len)\n', (971, 1032), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1127, 1143), 'shared.utils.opencopilot_utils.get_embeddings', 'get_embeddings', ([], {}), '()\n', (1141, 1143), False, 'from shared.utils.opencopilot_utils import get_embeddings, StoreOptions, get_file_path\n'), ((1290, 1383), 'shared.models.opencopilot_db.pdf_data_sources.update_pdf_data_source_status', 'update_pdf_data_source_status', ([], {'chatbot_id': 'bot_id', 'file_name': 'file_name', 'status': '"""COMPLETED"""'}), "(chatbot_id=bot_id, file_name=file_name,\n status='COMPLETED')\n", (1319, 1383), False, 'from shared.models.opencopilot_db.pdf_data_sources import insert_pdf_data_source, update_pdf_data_source_status\n'), ((755, 779), 'shared.utils.opencopilot_utils.get_file_path', 'get_file_path', (['file_name'], {}), '(file_name)\n', (768, 779), False, 'from shared.utils.opencopilot_utils import get_embeddings, StoreOptions, get_file_path\n'), ((874, 915), 'workers.utils.remove_escape_sequences.remove_escape_sequences', 'remove_escape_sequences', (['doc.page_content'], {}), '(doc.page_content)\n', (897, 915), False, 'from workers.utils.remove_escape_sequences import remove_escape_sequences\n'), ((1201, 1269), 'shared.utils.opencopilot_utils.StoreOptions', 'StoreOptions', ([], {'namespace': '"""knowledgebase"""', 'metadata': "{'bot_id': bot_id}"}), "(namespace='knowledgebase', metadata={'bot_id': bot_id})\n", (1213, 1269), False, 'from shared.utils.opencopilot_utils import get_embeddings, StoreOptions, get_file_path\n'), ((1437, 1527), 'shared.models.opencopilot_db.pdf_data_sources.update_pdf_data_source_status', 'update_pdf_data_source_status', ([], {'chatbot_id': 'bot_id', 'file_name': 'file_name', 'status': '"""FAILED"""'}), "(chatbot_id=bot_id, file_name=file_name,\n status='FAILED')\n", (1466, 1527), False, 'from shared.models.opencopilot_db.pdf_data_sources import insert_pdf_data_source, update_pdf_data_source_status\n'), ((2045, 2139), 'shared.models.opencopilot_db.pdf_data_sources.update_pdf_data_source_status', 'update_pdf_data_source_status', ([], {'chatbot_id': 'chatbot_id', 'file_name': 'file_name', 'status': '"""FAILED"""'}), "(chatbot_id=chatbot_id, file_name=file_name,\n status='FAILED')\n", (2074, 2139), False, 'from shared.models.opencopilot_db.pdf_data_sources import insert_pdf_data_source, update_pdf_data_source_status\n')]
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a simple standalone implementation showing rag pipeline using Nvidia AI Foundational models. # It uses a simple Streamlit UI and one file implementation of a minimalistic RAG pipeline. ############################################ # Component #1 - Document Loader ############################################ import streamlit as st import os st.set_page_config(layout = "wide") with st.sidebar: DOCS_DIR = os.path.abspath("./uploaded_docs") if not os.path.exists(DOCS_DIR): os.makedirs(DOCS_DIR) st.subheader("Add to the Knowledge Base") with st.form("my-form", clear_on_submit=True): uploaded_files = st.file_uploader("Upload a file to the Knowledge Base:", accept_multiple_files = True) submitted = st.form_submit_button("Upload!") if uploaded_files and submitted: for uploaded_file in uploaded_files: st.success(f"File {uploaded_file.name} uploaded successfully!") with open(os.path.join(DOCS_DIR, uploaded_file.name),"wb") as f: f.write(uploaded_file.read()) ############################################ # Component #2 - Embedding Model and LLM ############################################ from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings # make sure to export your NVIDIA AI Playground key as NVIDIA_API_KEY! llm = ChatNVIDIA(model="mixtral_8x7b") document_embedder = NVIDIAEmbeddings(model="nvolveqa_40k", model_type="passage") query_embedder = NVIDIAEmbeddings(model="nvolveqa_40k", model_type="query") ############################################ # Component #3 - Vector Database Store ############################################ from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import DirectoryLoader from langchain.vectorstores import FAISS import pickle with st.sidebar: # Option for using an existing vector store use_existing_vector_store = st.radio("Use existing vector store if available", ["Yes", "No"], horizontal=True) # Path to the vector store file vector_store_path = "vectorstore.pkl" # Load raw documents from the directory raw_documents = DirectoryLoader(DOCS_DIR).load() # Check for existing vector store file vector_store_exists = os.path.exists(vector_store_path) vectorstore = None if use_existing_vector_store == "Yes" and vector_store_exists: with open(vector_store_path, "rb") as f: vectorstore = pickle.load(f) with st.sidebar: st.success("Existing vector store loaded successfully.") else: with st.sidebar: if raw_documents: with st.spinner("Splitting documents into chunks..."): text_splitter = CharacterTextSplitter(chunk_size=2000, chunk_overlap=200) documents = text_splitter.split_documents(raw_documents) with st.spinner("Adding document chunks to vector database..."): vectorstore = FAISS.from_documents(documents, document_embedder) with st.spinner("Saving vector store"): with open(vector_store_path, "wb") as f: pickle.dump(vectorstore, f) st.success("Vector store created and saved.") else: st.warning("No documents available to process!", icon="⚠️") ############################################ # Component #4 - LLM Response Generation and Chat ############################################ st.subheader("Chat with your AI Assistant, Envie!") if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate prompt_template = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Envie. You will reply to questions only based on the context that you are provided. If something is out of context, you will refrain from replying and politely decline to respond to the user."), ("user", "{input}")] ) user_input = st.chat_input("Can you tell me what NVIDIA is known for?") llm = ChatNVIDIA(model="mixtral_8x7b") chain = prompt_template | llm | StrOutputParser() if user_input and vectorstore!=None: st.session_state.messages.append({"role": "user", "content": user_input}) retriever = vectorstore.as_retriever() docs = retriever.get_relevant_documents(user_input) with st.chat_message("user"): st.markdown(user_input) context = "" for doc in docs: context += doc.page_content + "\n\n" augmented_user_input = "Context: " + context + "\n\nQuestion: " + user_input + "\n" with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" for response in chain.stream({"input": augmented_user_input}): full_response += response message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": full_response})
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.document_loaders.DirectoryLoader", "langchain_core.output_parsers.StrOutputParser", "langchain.vectorstores.FAISS.from_documents", "langchain_core.prompts.ChatPromptTemplate.from_messages", "langchain_nvidia_ai_endpoints.NVIDIAEmbeddings", "langchain_nvidia_ai_endpoints.ChatNVIDIA" ]
[((1034, 1067), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (1052, 1067), True, 'import streamlit as st\n'), ((2031, 2063), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""mixtral_8x7b"""'}), "(model='mixtral_8x7b')\n", (2041, 2063), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings\n'), ((2084, 2144), 'langchain_nvidia_ai_endpoints.NVIDIAEmbeddings', 'NVIDIAEmbeddings', ([], {'model': '"""nvolveqa_40k"""', 'model_type': '"""passage"""'}), "(model='nvolveqa_40k', model_type='passage')\n", (2100, 2144), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings\n'), ((2162, 2220), 'langchain_nvidia_ai_endpoints.NVIDIAEmbeddings', 'NVIDIAEmbeddings', ([], {'model': '"""nvolveqa_40k"""', 'model_type': '"""query"""'}), "(model='nvolveqa_40k', model_type='query')\n", (2178, 2220), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings\n'), ((2925, 2958), 'os.path.exists', 'os.path.exists', (['vector_store_path'], {}), '(vector_store_path)\n', (2939, 2958), False, 'import os\n'), ((4095, 4146), 'streamlit.subheader', 'st.subheader', (['"""Chat with your AI Assistant, Envie!"""'], {}), "('Chat with your AI Assistant, Envie!')\n", (4107, 4146), True, 'import streamlit as st\n'), ((4480, 4788), 'langchain_core.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system',\n 'You are a helpful AI assistant named Envie. You will reply to questions only based on the context that you are provided. If something is out of context, you will refrain from replying and politely decline to respond to the user.'\n ), ('user', '{input}')]"], {}), "([('system',\n 'You are a helpful AI assistant named Envie. You will reply to questions only based on the context that you are provided. If something is out of context, you will refrain from replying and politely decline to respond to the user.'\n ), ('user', '{input}')])\n", (4512, 4788), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((4799, 4857), 'streamlit.chat_input', 'st.chat_input', (['"""Can you tell me what NVIDIA is known for?"""'], {}), "('Can you tell me what NVIDIA is known for?')\n", (4812, 4857), True, 'import streamlit as st\n'), ((4864, 4896), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""mixtral_8x7b"""'}), "(model='mixtral_8x7b')\n", (4874, 4896), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings\n'), ((1103, 1137), 'os.path.abspath', 'os.path.abspath', (['"""./uploaded_docs"""'], {}), "('./uploaded_docs')\n", (1118, 1137), False, 'import os\n'), ((1209, 1250), 'streamlit.subheader', 'st.subheader', (['"""Add to the Knowledge Base"""'], {}), "('Add to the Knowledge Base')\n", (1221, 1250), True, 'import streamlit as st\n'), ((2618, 2704), 'streamlit.radio', 'st.radio', (['"""Use existing vector store if available"""', "['Yes', 'No']"], {'horizontal': '(True)'}), "('Use existing vector store if available', ['Yes', 'No'],\n horizontal=True)\n", (2626, 2704), True, 'import streamlit as st\n'), ((4930, 4947), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (4945, 4947), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((4990, 5063), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': user_input}"], {}), "({'role': 'user', 'content': user_input})\n", (5022, 5063), True, 'import streamlit as st\n'), ((5738, 5823), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (5770, 5823), True, 'import streamlit as st\n'), ((1149, 1173), 'os.path.exists', 'os.path.exists', (['DOCS_DIR'], {}), '(DOCS_DIR)\n', (1163, 1173), False, 'import os\n'), ((1183, 1204), 'os.makedirs', 'os.makedirs', (['DOCS_DIR'], {}), '(DOCS_DIR)\n', (1194, 1204), False, 'import os\n'), ((1260, 1300), 'streamlit.form', 'st.form', (['"""my-form"""'], {'clear_on_submit': '(True)'}), "('my-form', clear_on_submit=True)\n", (1267, 1300), True, 'import streamlit as st\n'), ((1327, 1415), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload a file to the Knowledge Base:"""'], {'accept_multiple_files': '(True)'}), "('Upload a file to the Knowledge Base:',\n accept_multiple_files=True)\n", (1343, 1415), True, 'import streamlit as st\n'), ((1434, 1466), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Upload!"""'], {}), "('Upload!')\n", (1455, 1466), True, 'import streamlit as st\n'), ((2829, 2854), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['DOCS_DIR'], {}), '(DOCS_DIR)\n', (2844, 2854), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((3108, 3122), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3119, 3122), False, 'import pickle\n'), ((3152, 3208), 'streamlit.success', 'st.success', (['"""Existing vector store loaded successfully."""'], {}), "('Existing vector store loaded successfully.')\n", (3162, 3208), True, 'import streamlit as st\n'), ((4274, 4306), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (4289, 4306), True, 'import streamlit as st\n'), ((4316, 4347), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (4327, 4347), True, 'import streamlit as st\n'), ((5172, 5195), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5187, 5195), True, 'import streamlit as st\n'), ((5205, 5228), 'streamlit.markdown', 'st.markdown', (['user_input'], {}), '(user_input)\n', (5216, 5228), True, 'import streamlit as st\n'), ((5412, 5440), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5427, 5440), True, 'import streamlit as st\n'), ((5472, 5482), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5480, 5482), True, 'import streamlit as st\n'), ((1562, 1625), 'streamlit.success', 'st.success', (['f"""File {uploaded_file.name} uploaded successfully!"""'], {}), "(f'File {uploaded_file.name} uploaded successfully!')\n", (1572, 1625), True, 'import streamlit as st\n'), ((3821, 3866), 'streamlit.success', 'st.success', (['"""Vector store created and saved."""'], {}), "('Vector store created and saved.')\n", (3831, 3866), True, 'import streamlit as st\n'), ((3893, 3952), 'streamlit.warning', 'st.warning', (['"""No documents available to process!"""'], {'icon': '"""⚠️"""'}), "('No documents available to process!', icon='⚠️')\n", (3903, 3952), True, 'import streamlit as st\n'), ((3279, 3327), 'streamlit.spinner', 'st.spinner', (['"""Splitting documents into chunks..."""'], {}), "('Splitting documents into chunks...')\n", (3289, 3327), True, 'import streamlit as st\n'), ((3361, 3418), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(2000)', 'chunk_overlap': '(200)'}), '(chunk_size=2000, chunk_overlap=200)\n', (3382, 3418), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3510, 3568), 'streamlit.spinner', 'st.spinner', (['"""Adding document chunks to vector database..."""'], {}), "('Adding document chunks to vector database...')\n", (3520, 3568), True, 'import streamlit as st\n'), ((3600, 3650), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['documents', 'document_embedder'], {}), '(documents, document_embedder)\n', (3620, 3650), False, 'from langchain.vectorstores import FAISS\n'), ((3669, 3702), 'streamlit.spinner', 'st.spinner', (['"""Saving vector store"""'], {}), "('Saving vector store')\n", (3679, 3702), True, 'import streamlit as st\n'), ((1648, 1690), 'os.path.join', 'os.path.join', (['DOCS_DIR', 'uploaded_file.name'], {}), '(DOCS_DIR, uploaded_file.name)\n', (1660, 1690), False, 'import os\n'), ((3781, 3808), 'pickle.dump', 'pickle.dump', (['vectorstore', 'f'], {}), '(vectorstore, f)\n', (3792, 3808), False, 'import pickle\n')]
from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain from langchain.prompts.prompt import PromptTemplate from langchain.vectorstores.base import VectorStoreRetriever from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory import pickle import os from env import OPENAI_API_KEY os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. You can assume the question about the most recent state of the union address. Chat History: {chat_history} Follow Up Input: {question} Standalone question:""" CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) template = """You are an AI assistant for answering questions about the most recent state of the union address. You are given the following extracted parts of a long document and a question. Provide a conversational answer. If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer. If the question is not about the most recent state of the union, politely inform them that you are tuned to only answer questions about the most recent state of the union. Lastly, answer the question as if you were a pirate from the south seas and are just coming back from a pirate expedition where you found a treasure chest full of gold doubloons. Question: {question} ========= {context} ========= Answer in Markdown:""" QA_PROMPT = PromptTemplate(template=template, input_variables=[ "question", "context"]) pyhealth_template = """ 1. role clarification, task definition (high-level instruction), and model encouragement 2. task flow/procedures (input/output clarification) 3. information (chat history, retrieval results: doc + codes) -> separate code doc and txt doc 4. notice (law, regulation, policy, etc.) -> !!! 5. now, give the response. tricks: - important info should be at the beginning or in the end. knowledge is in the middle. - use "sep" to block the prompt """ def load_retriever(): with open("vectorstore.pkl", "rb") as f: vectorstore = pickle.load(f) retriever = VectorStoreRetriever(vectorstore=vectorstore) return retriever def get_basic_qa_chain(): llm = ChatOpenAI(model_name="gpt-4", temperature=0) retriever = load_retriever() memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True) model = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory) return model def get_custom_prompt_qa_chain(): llm = ChatOpenAI(model_name="gpt-4", temperature=0) retriever = load_retriever() memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True) # see: https://github.com/langchain-ai/langchain/issues/6635 # see: https://github.com/langchain-ai/langchain/issues/1497 model = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory, combine_docs_chain_kwargs={"prompt": QA_PROMPT}) return model def get_condense_prompt_qa_chain(): llm = ChatOpenAI(model_name="gpt-4", temperature=0) retriever = load_retriever() memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True) # see: https://github.com/langchain-ai/langchain/issues/5890 model = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, memory=memory, condense_question_prompt=CONDENSE_QUESTION_PROMPT, combine_docs_chain_kwargs={"prompt": QA_PROMPT}) return model def get_qa_with_sources_chain(): llm = ChatOpenAI(model_name="gpt-4", temperature=0) retriever = load_retriever() history = [] model = ConversationalRetrievalChain.from_llm( llm=llm, retriever=retriever, return_source_documents=True) def model_func(question): # bug: this doesn't work with the built-in memory # hacking around it for the tutorial # see: https://github.com/langchain-ai/langchain/issues/5630 new_input = {"question": question['question'], "chat_history": history} result = model(new_input) history.append((question['question'], result['answer'])) return result return model_func def get_basic_chain(): llm = ChatOpenAI(model_name="gpt-4", temperature=0) memory = ConversationBufferMemory() model = ConversationChain( llm=llm, memory=memory) return model chain_options = { "basic": get_basic_qa_chain, "with_sources": get_qa_with_sources_chain, "custom_prompt": get_custom_prompt_qa_chain, "condense_prompt": get_condense_prompt_qa_chain }
[ "langchain.chains.ConversationChain", "langchain.prompts.prompt.PromptTemplate", "langchain.vectorstores.base.VectorStoreRetriever", "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI", "langchain.prompts.prompt.PromptTemplate.from_template" ]
[((727, 766), 'langchain.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_template'], {}), '(_template)\n', (755, 766), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1521, 1595), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question', 'context']"}), "(template=template, input_variables=['question', 'context'])\n", (1535, 1595), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2225, 2270), 'langchain.vectorstores.base.VectorStoreRetriever', 'VectorStoreRetriever', ([], {'vectorstore': 'vectorstore'}), '(vectorstore=vectorstore)\n', (2245, 2270), False, 'from langchain.vectorstores.base import VectorStoreRetriever\n'), ((2330, 2375), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (2340, 2375), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2422, 2495), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (2446, 2495), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2517, 2604), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'memory': 'memory'}), '(llm=llm, retriever=retriever, memory=\n memory)\n', (2554, 2604), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain\n'), ((2688, 2733), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (2698, 2733), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2780, 2853), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (2804, 2853), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3005, 3141), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'memory': 'memory', 'combine_docs_chain_kwargs': "{'prompt': QA_PROMPT}"}), "(llm=llm, retriever=retriever, memory=\n memory, combine_docs_chain_kwargs={'prompt': QA_PROMPT})\n", (3042, 3141), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain\n'), ((3235, 3280), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (3245, 3280), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3327, 3400), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (3351, 3400), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3487, 3678), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'memory': 'memory', 'condense_question_prompt': 'CONDENSE_QUESTION_PROMPT', 'combine_docs_chain_kwargs': "{'prompt': QA_PROMPT}"}), "(llm=llm, retriever=retriever, memory=\n memory, condense_question_prompt=CONDENSE_QUESTION_PROMPT,\n combine_docs_chain_kwargs={'prompt': QA_PROMPT})\n", (3524, 3678), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain\n'), ((3773, 3818), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (3783, 3818), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3881, 3982), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'return_source_documents': '(True)'}), '(llm=llm, retriever=retriever,\n return_source_documents=True)\n', (3918, 3982), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain\n'), ((4466, 4511), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (4476, 4511), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4525, 4551), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (4549, 4551), False, 'from langchain.memory import ConversationBufferMemory\n'), ((4564, 4605), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'memory': 'memory'}), '(llm=llm, memory=memory)\n', (4581, 4605), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain, ConversationChain\n'), ((2194, 2208), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2205, 2208), False, 'import pickle\n')]
# flake8: noqa from langchain.prompts import PromptTemplate ## Use a shorter template to reduce the number of tokens in the prompt template = """Create a final answer to the given questions using the provided document excerpts (given in no particular order) as sources. ALWAYS include a "SOURCES" section in your answer citing only the minimal set of sources needed to answer the question. If you are unable to answer the question, simply state that you do not have enough information to answer the question and leave the SOURCES section empty. Use only the provided documents and do not attempt to fabricate an answer. --------- QUESTION: What is the purpose of ARPA-H? ========= Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt's based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer's, diabetes, and more. SOURCES: 1-32 Content: While we're at it, let's make sure every American can get the health care they need. \n\nWe've already made historic investments in health care. \n\nWe've made it easier for Americans to get the care they need, when they need it. \n\nWe've made it easier for Americans to get the treatments they need, when they need them. \n\nWe've made it easier for Americans to get the medications they need, when they need them. SOURCES: 1-33 Content: The V.A. is pioneering new ways of linking toxic exposures to disease, already helping veterans get the care they deserve. \n\nWe need to extend that same care to all Americans. \n\nThat's why I'm calling on Congress to pass legislation that would establish a national registry of toxic exposures, and provide health care and financial assistance to those affected. SOURCES: 1-30 ========= FINAL ANSWER: The purpose of ARPA-H is to drive breakthroughs in cancer, Alzheimer's, diabetes, and more. SOURCES: 1-32 --------- QUESTION: {question} ========= {summaries} ========= FINAL ANSWER:""" STUFF_PROMPT = PromptTemplate( template=template, input_variables=["summaries", "question"] )
[ "langchain.prompts.PromptTemplate" ]
[((2121, 2197), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['summaries', 'question']"}), "(template=template, input_variables=['summaries', 'question'])\n", (2135, 2197), False, 'from langchain.prompts import PromptTemplate\n')]