| code
				 stringlengths 141 79.4k | apis
				 listlengths 1 23 | extract_api
				 stringlengths 126 73.2k | 
|---|---|---|
| 
	from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel
from hyde.prompts import hyde_prompt
# Example for document loading (from url), splitting, and creating vectostore
""" 
# Load
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = Chroma.from_documents(documents=all_splits, 
                                    collection_name="rag-chroma",
                                    embedding=OpenAIEmbeddings(),
                                    )
retriever = vectorstore.as_retriever()
"""
# Embed a single document as a test
vectorstore = Chroma.from_texts(
    ["harrison worked at kensho"],
    collection_name="rag-chroma",
    embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# LLM
model = ChatOpenAI()
# Query transformation chain
# This transforms the query into the hypothetical document
hyde_chain = hyde_prompt | model | StrOutputParser()
# RAG chain
chain = (
    RunnableParallel(
        {
            # Generate a hypothetical document and then pass it to the retriever
            "context": hyde_chain | retriever,
            "question": lambda x: x["question"],
        }
    )
    | prompt
    | model
    | StrOutputParser()
)
# Add input types for playground
class ChainInput(BaseModel):
    question: str
chain = chain.with_types(input_type=ChainInput)
 | 
	[
  "langchain_community.chat_models.ChatOpenAI",
  "langchain_core.prompts.ChatPromptTemplate.from_template",
  "langchain_core.output_parsers.StrOutputParser",
  "langchain_community.embeddings.OpenAIEmbeddings",
  "langchain_core.runnables.RunnableParallel"
] | 
	[((1516, 1558), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1548, 1558), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1574, 1586), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1584, 1586), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1711, 1728), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1726, 1728), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((2008, 2025), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2023, 2025), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1325, 1343), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1341, 1343), False, 'from langchain_community.embeddings import OpenAIEmbeddings\n'), ((1756, 1850), 'langchain_core.runnables.RunnableParallel', 'RunnableParallel', (["{'context': hyde_chain | retriever, 'question': lambda x: x['question']}"], {}), "({'context': hyde_chain | retriever, 'question': lambda x:\n    x['question']})\n", (1772, 1850), False, 'from langchain_core.runnables import RunnableParallel\n')] | 
| 
	"""
AI Module
This module provides an AI class that interfaces with language models to perform various tasks such as
starting a conversation, advancing the conversation, and handling message serialization. It also includes
backoff strategies for handling rate limit errors from the OpenAI API.
Classes:
    AI: A class that interfaces with language models for conversation management and message serialization.
Functions:
    serialize_messages(messages: List[Message]) -> str
        Serialize a list of messages to a JSON string.
"""
from __future__ import annotations
import json
import logging
import os
from pathlib import Path
from typing import List, Optional, Union
import backoff
import openai
import pyperclip
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    messages_from_dict,
    messages_to_dict,
)
from langchain_anthropic import ChatAnthropic
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from gpt_engineer.core.token_usage import TokenUsageLog
# Type hint for a chat message
Message = Union[AIMessage, HumanMessage, SystemMessage]
# Set up logging
logger = logging.getLogger(__name__)
class AI:
    """
    A class that interfaces with language models for conversation management and message serialization.
    This class provides methods to start and advance conversations, handle message serialization,
    and implement backoff strategies for rate limit errors when interacting with the OpenAI API.
    Attributes
    ----------
    temperature : float
        The temperature setting for the language model.
    azure_endpoint : str
        The endpoint URL for the Azure-hosted language model.
    model_name : str
        The name of the language model to use.
    streaming : bool
        A flag indicating whether to use streaming for the language model.
    llm : BaseChatModel
        The language model instance for conversation management.
    token_usage_log : TokenUsageLog
        A log for tracking token usage during conversations.
    Methods
    -------
    start(system: str, user: str, step_name: str) -> List[Message]
        Start the conversation with a system message and a user message.
    next(messages: List[Message], prompt: Optional[str], step_name: str) -> List[Message]
        Advances the conversation by sending message history to LLM and updating with the response.
    backoff_inference(messages: List[Message]) -> Any
        Perform inference using the language model with an exponential backoff strategy.
    serialize_messages(messages: List[Message]) -> str
        Serialize a list of messages to a JSON string.
    deserialize_messages(jsondictstr: str) -> List[Message]
        Deserialize a JSON string to a list of messages.
    _create_chat_model() -> BaseChatModel
        Create a chat model with the specified model name and temperature.
    """
    def __init__(
        self,
        model_name="gpt-4-1106-preview",
        temperature=0.1,
        azure_endpoint="",
        streaming=True,
    ):
        """
        Initialize the AI class.
        Parameters
        ----------
        model_name : str, optional
            The name of the model to use, by default "gpt-4".
        temperature : float, optional
            The temperature to use for the model, by default 0.1.
        """
        self.temperature = temperature
        self.azure_endpoint = azure_endpoint
        self.model_name = model_name
        self.streaming = streaming
        self.llm = self._create_chat_model()
        self.token_usage_log = TokenUsageLog(model_name)
        logger.debug(f"Using model {self.model_name}")
    def start(self, system: str, user: str, step_name: str) -> List[Message]:
        """
        Start the conversation with a system message and a user message.
        Parameters
        ----------
        system : str
            The content of the system message.
        user : str
            The content of the user message.
        step_name : str
            The name of the step.
        Returns
        -------
        List[Message]
            The list of messages in the conversation.
        """
        messages: List[Message] = [
            SystemMessage(content=system),
            HumanMessage(content=user),
        ]
        return self.next(messages, step_name=step_name)
    def next(
        self,
        messages: List[Message],
        prompt: Optional[str] = None,
        *,
        step_name: str,
    ) -> List[Message]:
        """
        Advances the conversation by sending message history
        to LLM and updating with the response.
        Parameters
        ----------
        messages : List[Message]
            The list of messages in the conversation.
        prompt : Optional[str], optional
            The prompt to use, by default None.
        step_name : str
            The name of the step.
        Returns
        -------
        List[Message]
            The updated list of messages in the conversation.
        """
        if prompt:
            messages.append(HumanMessage(content=prompt))
        logger.debug(f"Creating a new chat completion: {messages}")
        messages = self._collapse_messages(messages)
        response = self.backoff_inference(messages)
        self.token_usage_log.update_log(
            messages=messages, answer=response.content, step_name=step_name
        )
        messages.append(response)
        logger.debug(f"Chat completion finished: {messages}")
        return messages
    def _collapse_messages(self, messages: List[Message]):
        """
        Combine consecutive messages of the same type into a single message.
        This method iterates through the list of messages, combining consecutive messages of the same type
        by joining their content with a newline character. This reduces the number of messages and simplifies
        the conversation for processing.
        Parameters
        ----------
        messages : List[Message]
            The list of messages to collapse.
        Returns
        -------
        List[Message]
            The list of messages after collapsing consecutive messages of the same type.
        """
        collapsed_messages = []
        if not messages:
            return collapsed_messages
        previous_message = messages[0]
        combined_content = previous_message.content
        for current_message in messages[1:]:
            if current_message.type == previous_message.type:
                combined_content += "\n\n" + current_message.content
            else:
                collapsed_messages.append(
                    previous_message.__class__(content=combined_content)
                )
                previous_message = current_message
                combined_content = current_message.content
        collapsed_messages.append(previous_message.__class__(content=combined_content))
        return collapsed_messages
    @backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45)
    def backoff_inference(self, messages):
        """
        Perform inference using the language model while implementing an exponential backoff strategy.
        This function will retry the inference in case of a rate limit error from the OpenAI API.
        It uses an exponential backoff strategy, meaning the wait time between retries increases
        exponentially. The function will attempt to retry up to 7 times within a span of 45 seconds.
        Parameters
        ----------
        messages : List[Message]
            A list of chat messages which will be passed to the language model for processing.
        callbacks : List[Callable]
            A list of callback functions that are triggered after each inference. These functions
            can be used for logging, monitoring, or other auxiliary tasks.
        Returns
        -------
        Any
            The output from the language model after processing the provided messages.
        Raises
        ------
        openai.error.RateLimitError
            If the number of retries exceeds the maximum or if the rate limit persists beyond the
            allotted time, the function will ultimately raise a RateLimitError.
        Example
        -------
        >>> messages = [SystemMessage(content="Hello"), HumanMessage(content="How's the weather?")]
        >>> response = backoff_inference(messages)
        """
        return self.llm.invoke(messages)  # type: ignore
    @staticmethod
    def serialize_messages(messages: List[Message]) -> str:
        """
        Serialize a list of messages to a JSON string.
        Parameters
        ----------
        messages : List[Message]
            The list of messages to serialize.
        Returns
        -------
        str
            The serialized messages as a JSON string.
        """
        return json.dumps(messages_to_dict(messages))
    @staticmethod
    def deserialize_messages(jsondictstr: str) -> List[Message]:
        """
        Deserialize a JSON string to a list of messages.
        Parameters
        ----------
        jsondictstr : str
            The JSON string to deserialize.
        Returns
        -------
        List[Message]
            The deserialized list of messages.
        """
        data = json.loads(jsondictstr)
        # Modify implicit is_chunk property to ALWAYS false
        # since Langchain's Message schema is stricter
        prevalidated_data = [
            {**item, "tools": {**item.get("tools", {}), "is_chunk": False}}
            for item in data
        ]
        return list(messages_from_dict(prevalidated_data))  # type: ignore
    def _create_chat_model(self) -> BaseChatModel:
        """
        Create a chat model with the specified model name and temperature.
        Parameters
        ----------
        model : str
            The name of the model to create.
        temperature : float
            The temperature to use for the model.
        Returns
        -------
        BaseChatModel
            The created chat model.
        """
        if self.azure_endpoint:
            return AzureChatOpenAI(
                azure_endpoint=self.azure_endpoint,
                openai_api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"),
                deployment_name=self.model_name,
                openai_api_type="azure",
                streaming=self.streaming,
                callbacks=[StreamingStdOutCallbackHandler()],
            )
        if "claude" in self.model_name:
            return ChatAnthropic(
                model=self.model_name,
                temperature=self.temperature,
                callbacks=[StreamingStdOutCallbackHandler()],
                max_tokens_to_sample=4096,
            )
        return ChatOpenAI(
            model=self.model_name,
            temperature=self.temperature,
            streaming=self.streaming,
            callbacks=[StreamingStdOutCallbackHandler()],
        )
def serialize_messages(messages: List[Message]) -> str:
    return AI.serialize_messages(messages)
class ClipboardAI(AI):
    # Ignore not init superclass
    def __init__(self, **_):  # type: ignore
        pass
    @staticmethod
    def serialize_messages(messages: List[Message]) -> str:
        return "\n\n".join([f"{m.type}:\n{m.content}" for m in messages])
    @staticmethod
    def multiline_input():
        print("Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.")
        content = []
        while True:
            try:
                line = input()
            except EOFError:
                break
            content.append(line)
        return "\n".join(content)
    def next(
        self,
        messages: List[Message],
        prompt: Optional[str] = None,
        *,
        step_name: str,
    ) -> List[Message]:
        """
        Not yet fully supported
        """
        if prompt:
            messages.append(HumanMessage(content=prompt))
        logger.debug(f"Creating a new chat completion: {messages}")
        msgs = self.serialize_messages(messages)
        pyperclip.copy(msgs)
        Path("clipboard.txt").write_text(msgs)
        print(
            "Messages copied to clipboard and written to clipboard.txt,",
            len(msgs),
            "characters in total",
        )
        response = self.multiline_input()
        messages.append(AIMessage(content=response))
        logger.debug(f"Chat completion finished: {messages}")
        return messages
 | 
	[
  "langchain.schema.messages_to_dict",
  "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
  "langchain.schema.messages_from_dict",
  "langchain.schema.HumanMessage",
  "langchain.schema.AIMessage",
  "langchain.schema.SystemMessage"
] | 
	[((1266, 1293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1283, 1293), False, 'import logging\n'), ((7101, 7188), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'openai.RateLimitError'], {'max_tries': '(7)', 'max_time': '(45)'}), '(backoff.expo, openai.RateLimitError, max_tries=7,\n    max_time=45)\n', (7121, 7188), False, 'import backoff\n'), ((3698, 3723), 'gpt_engineer.core.token_usage.TokenUsageLog', 'TokenUsageLog', (['model_name'], {}), '(model_name)\n', (3711, 3723), False, 'from gpt_engineer.core.token_usage import TokenUsageLog\n'), ((9467, 9490), 'json.loads', 'json.loads', (['jsondictstr'], {}), '(jsondictstr)\n', (9477, 9490), False, 'import json\n'), ((12276, 12296), 'pyperclip.copy', 'pyperclip.copy', (['msgs'], {}), '(msgs)\n', (12290, 12296), False, 'import pyperclip\n'), ((4343, 4372), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system'}), '(content=system)\n', (4356, 4372), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((4386, 4412), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user'}), '(content=user)\n', (4398, 4412), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9048, 9074), 'langchain.schema.messages_to_dict', 'messages_to_dict', (['messages'], {}), '(messages)\n', (9064, 9074), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9771, 9808), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['prevalidated_data'], {}), '(prevalidated_data)\n', (9789, 9808), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12569, 12596), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (12578, 12596), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((5209, 5237), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (5221, 5237), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12119, 12147), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (12131, 12147), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12305, 12326), 'pathlib.Path', 'Path', (['"""clipboard.txt"""'], {}), "('clipboard.txt')\n", (12309, 12326), False, 'from pathlib import Path\n'), ((10405, 10450), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""', '"""2023-05-15"""'], {}), "('OPENAI_API_VERSION', '2023-05-15')\n", (10414, 10450), False, 'import os\n'), ((11105, 11137), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (11135, 11137), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10611, 10643), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10641, 10643), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10847, 10879), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10877, 10879), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] | 
| 
	import os
import csv
from datetime import datetime
from constants import EMBEDDING_MODEL_NAME
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
def log_to_csv(question, answer):
    log_dir, log_file = "local_chat_history", "qa_log.csv"
    # Ensure log directory exists, create if not
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    # Construct the full file path
    log_path = os.path.join(log_dir, log_file)
    # Check if file exists, if not create and write headers
    if not os.path.isfile(log_path):
        with open(log_path, mode="w", newline="", encoding="utf-8") as file:
            writer = csv.writer(file)
            writer.writerow(["timestamp", "question", "answer"])
    # Append the log entry
    with open(log_path, mode="a", newline="", encoding="utf-8") as file:
        writer = csv.writer(file)
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        writer.writerow([timestamp, question, answer])
def get_embeddings(device_type="cuda"):
    if "instructor" in EMBEDDING_MODEL_NAME:
        return HuggingFaceInstructEmbeddings(
            model_name=EMBEDDING_MODEL_NAME,
            model_kwargs={"device": device_type},
            embed_instruction="Represent the document for retrieval:",
            query_instruction="Represent the question for retrieving supporting documents:",
        )
    elif "bge" in EMBEDDING_MODEL_NAME:
        return HuggingFaceBgeEmbeddings(
            model_name=EMBEDDING_MODEL_NAME,
            model_kwargs={"device": device_type},
            query_instruction="Represent this sentence for searching relevant passages:",
        )
    else:
        return HuggingFaceEmbeddings(
            model_name=EMBEDDING_MODEL_NAME,
            model_kwargs={"device": device_type},
        )
 | 
	[
  "langchain.embeddings.HuggingFaceInstructEmbeddings",
  "langchain.embeddings.HuggingFaceEmbeddings",
  "langchain.embeddings.HuggingFaceBgeEmbeddings"
] | 
	[((531, 562), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (543, 562), False, 'import os\n'), ((426, 449), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (440, 449), False, 'import os\n'), ((459, 479), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (470, 479), False, 'import os\n'), ((635, 659), 'os.path.isfile', 'os.path.isfile', (['log_path'], {}), '(log_path)\n', (649, 659), False, 'import os\n'), ((959, 975), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (969, 975), False, 'import csv\n'), ((1198, 1453), 'langchain.embeddings.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL_NAME', 'model_kwargs': "{'device': device_type}", 'embed_instruction': '"""Represent the document for retrieval:"""', 'query_instruction': '"""Represent the question for retrieving supporting documents:"""'}), "(model_name=EMBEDDING_MODEL_NAME, model_kwargs\n    ={'device': device_type}, embed_instruction=\n    'Represent the document for retrieval:', query_instruction=\n    'Represent the question for retrieving supporting documents:')\n", (1227, 1453), False, 'from langchain.embeddings import HuggingFaceInstructEmbeddings\n'), ((759, 775), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (769, 775), False, 'import csv\n'), ((1554, 1737), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL_NAME', 'model_kwargs': "{'device': device_type}", 'query_instruction': '"""Represent this sentence for searching relevant passages:"""'}), "(model_name=EMBEDDING_MODEL_NAME, model_kwargs={\n    'device': device_type}, query_instruction=\n    'Represent this sentence for searching relevant passages:')\n", (1578, 1737), False, 'from langchain.embeddings import HuggingFaceBgeEmbeddings\n'), ((1801, 1898), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL_NAME', 'model_kwargs': "{'device': device_type}"}), "(model_name=EMBEDDING_MODEL_NAME, model_kwargs={\n    'device': device_type})\n", (1822, 1898), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((996, 1010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1008, 1010), False, 'from datetime import datetime\n')] | 
| 
	from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     stream: bool = Body(False, description="流式输出"),
                     echo: bool = Body(False, description="除了输出之外,还回显输入"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     ):
    #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
    async def completion_iterator(query: str,
                                  model_name: str = LLM_MODELS[0],
                                  prompt_name: str = prompt_name,
                                  echo: bool = echo,
                                  ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = AsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None
        model = get_OpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
            echo=echo
        )
        prompt_template = get_prompt_template("completion", prompt_name)
        prompt = PromptTemplate.from_template(prompt_template)
        chain = LLMChain(prompt=prompt, llm=model)
        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query}),
            callback.done),
        )
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield token
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            yield answer
        await task
    return EventSourceResponse(completion_iterator(query=query,
                                                 model_name=model_name,
                                                 prompt_name=prompt_name),
                             )
 | 
	[
  "langchain.chains.LLMChain",
  "langchain.prompts.PromptTemplate.from_template",
  "langchain.callbacks.AsyncIteratorCallbackHandler"
] | 
	[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n    max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] | 
| 
	# — coding: utf-8 –
import openai
import json
import logging
import sys
import argparse
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
    ChatPromptTemplate,
    MessagesPlaceholder,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate
)
from langchain import LLMChain
import numpy as np
import requests
import os
import subprocess
import re
import importlib.util
from sklearn.metrics.pairwise import cosine_similarity
import pickle
from util import *
from tqdm import tqdm
openai.api_key = os.environ["OPENAI_API_KEY"]
def get_last_processed_index(progress_file):
    """Retrieve the last processed index from the progress file."""
    if os.path.exists(progress_file):
        with open(progress_file, 'r', encoding='utf-8') as f:
            last_index = f.read().strip()
            return int(last_index) if last_index else 0
    else:
        return 0
def update_progress(progress_file, index):
    """Update the last processed index in the progress file."""
    with open(progress_file, 'w', encoding='utf-8') as f:
        f.write(str(index))
def task_decompose(question, Tool_dic, model_name):
    chat = ChatOpenAI(model_name=model_name)
    template = "You are a helpful assistant."
    system_message_prompt = SystemMessagePromptTemplate.from_template(template)
    human_message_prompt = HumanMessagePromptTemplate.from_template(
        "We have spotify database and the following tools:\n"
        "{Tool_dic}"
        "You need to decompose a complex user's question into some simple subtasks and let the model execute it step by step with these tools.\n"
        "Please note that: \n"
        "1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n"
        "2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n"
        "3. If you think you do not need to use the tool to solve the subtask, just leave it as {{\"ID\": -1}}\n"
        "4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path."
        "5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n"
        "'''\n"
        "Question: Pause the player"
        "Example 1: [{{\"Task\":\"Get information about the user’s current playback state\", \"ID\":15}}, {{\"Task\":\"Pause playback on the user's account\", \"ID\":19}}]\n"
        "'''\n"
        "This is the user's question: {question}\n"
        "Output:"
    )
    chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
    chain = LLMChain(llm=chat, prompt=chat_prompt)
    ind = 0
    while True:
        try:
            result = chain.run(question=question, Tool_dic=Tool_dic)
            result = eval(result.split('\n\n')[0])
            break
        except Exception as e:
            print(f"task decompose fails: {e}")
            if ind > 10:
                return -1
            ind += 1
            continue
    return result
def task_execution(
        Tool_dic, dic_tool, test_data, progress_file,
        start_index, total_files, retrieval_num, ind, model_name):
    with tqdm(total=total_files, desc="Processing files", initial=start_index) as pbar:
        for i, data in enumerate(test_data[start_index:], start=start_index):
            question = data["query"]
            print(question)
            task_path = task_decompose(question, Tool_dic, model_name)
            tool_choice_ls = []
            for task in task_path:
                if isinstance(task["ID"], list):
                    for ele in task["ID"]:
                        tool_choice_ls.append(dic_tool[ele]['tool_usage'])
                elif int(task["ID"]) in dic_tool.keys():
                    tool_choice_ls.append(dic_tool[task["ID"]]['tool_usage'])
            ind = ind + 1
            with open(f"restbench_{model_name}_Easytool.jsonl", 'a+', encoding='utf-8') as f:
                line = json.dumps({
                    "ID": ind,
                    "question": question,
                    "task_path": task_path,
                    "tool_choice_ls": tool_choice_ls
                }, ensure_ascii=False)
                f.write(line + '\n')
            print(tool_choice_ls)
            update_progress(progress_file, i + 1)
            pbar.update(1)
 | 
	[
  "langchain.LLMChain",
  "langchain.prompts.HumanMessagePromptTemplate.from_template",
  "langchain.chat_models.ChatOpenAI",
  "langchain.prompts.ChatPromptTemplate.from_messages",
  "langchain.prompts.SystemMessagePromptTemplate.from_template"
] | 
	[((717, 746), 'os.path.exists', 'os.path.exists', (['progress_file'], {}), '(progress_file)\n', (731, 746), False, 'import os\n'), ((1210, 1243), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1220, 1243), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1320, 1371), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (1361, 1371), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1400, 2422), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""'], {}), '(\n    """We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""\n    )\n', (1440, 2422), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2637, 2716), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (2669, 2716), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2730, 2768), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt'}), '(llm=chat, prompt=chat_prompt)\n', (2738, 2768), False, 'from langchain import LLMChain\n'), ((3309, 3378), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_files', 'desc': '"""Processing files"""', 'initial': 'start_index'}), "(total=total_files, desc='Processing files', initial=start_index)\n", (3313, 3378), False, 'from tqdm import tqdm\n'), ((4128, 4255), 'json.dumps', 'json.dumps', (["{'ID': ind, 'question': question, 'task_path': task_path, 'tool_choice_ls':\n    tool_choice_ls}"], {'ensure_ascii': '(False)'}), "({'ID': ind, 'question': question, 'task_path': task_path,\n    'tool_choice_ls': tool_choice_ls}, ensure_ascii=False)\n", (4138, 4255), False, 'import json\n')] | 
| 
	from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
 | 
	[
  "langchain.llms.Ollama"
] | 
	[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] | 
| 
	import os
from pathlib import Path
from typing import Union
import cloudpickle
import yaml
from mlflow.exceptions import MlflowException
from mlflow.langchain.utils import (
    _BASE_LOAD_KEY,
    _CONFIG_LOAD_KEY,
    _MODEL_DATA_FOLDER_NAME,
    _MODEL_DATA_KEY,
    _MODEL_DATA_PKL_FILE_NAME,
    _MODEL_DATA_YAML_FILE_NAME,
    _MODEL_LOAD_KEY,
    _MODEL_TYPE_KEY,
    _RUNNABLE_LOAD_KEY,
    _UNSUPPORTED_MODEL_ERROR_MESSAGE,
    _load_base_lcs,
    _load_from_json,
    _load_from_pickle,
    _load_from_yaml,
    _save_base_lcs,
    _validate_and_wrap_lc_model,
    base_lc_types,
    custom_type_to_loader_dict,
    lc_runnable_assign_types,
    lc_runnable_branch_types,
    lc_runnable_with_steps_types,
    lc_runnables_types,
    picklable_runnable_types,
)
_STEPS_FOLDER_NAME = "steps"
_RUNNABLE_STEPS_FILE_NAME = "steps.yaml"
_BRANCHES_FOLDER_NAME = "branches"
_MAPPER_FOLDER_NAME = "mapper"
_RUNNABLE_BRANCHES_FILE_NAME = "branches.yaml"
_DEFAULT_BRANCH_NAME = "default"
def _load_model_from_config(path, model_config):
    from langchain.chains.loading import type_to_loader_dict as chains_type_to_loader_dict
    from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict
    try:
        from langchain.prompts.loading import type_to_loader_dict as prompts_types
    except ImportError:
        prompts_types = {"prompt", "few_shot_prompt"}
    config_path = os.path.join(path, model_config.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME))
    # Load runnables from config file
    if config_path.endswith(".yaml"):
        config = _load_from_yaml(config_path)
    elif config_path.endswith(".json"):
        config = _load_from_json(config_path)
    else:
        raise MlflowException(
            f"Cannot load runnable without a config file. Got path {config_path}."
        )
    _type = config.get("_type")
    if _type in chains_type_to_loader_dict:
        from langchain.chains.loading import load_chain
        return load_chain(config_path)
    elif _type in prompts_types:
        from langchain.prompts.loading import load_prompt
        return load_prompt(config_path)
    elif _type in llms_get_type_to_cls_dict():
        from langchain.llms.loading import load_llm
        return load_llm(config_path)
    elif _type in custom_type_to_loader_dict():
        return custom_type_to_loader_dict()[_type](config)
    raise MlflowException(f"Unsupported type {_type} for loading.")
def _load_model_from_path(path: str, model_config=None):
    model_load_fn = model_config.get(_MODEL_LOAD_KEY)
    if model_load_fn == _RUNNABLE_LOAD_KEY:
        return _load_runnables(path, model_config)
    if model_load_fn == _BASE_LOAD_KEY:
        return _load_base_lcs(path, model_config)
    if model_load_fn == _CONFIG_LOAD_KEY:
        return _load_model_from_config(path, model_config)
    raise MlflowException(f"Unsupported model load key {model_load_fn}")
def _load_runnable_with_steps(file_path: Union[Path, str], model_type: str):
    """Load the model
    Args:
        file_path: Path to file to load the model from.
        model_type: Type of the model to load.
    """
    from langchain.schema.runnable import RunnableParallel, RunnableSequence
    # Convert file to Path object.
    load_path = Path(file_path)
    if not load_path.exists() or not load_path.is_dir():
        raise MlflowException(
            f"File {load_path} must exist and must be a directory "
            "in order to load runnable with steps."
        )
    steps_conf_file = load_path / _RUNNABLE_STEPS_FILE_NAME
    if not steps_conf_file.exists():
        raise MlflowException(
            f"File {steps_conf_file} must exist in order to load runnable with steps."
        )
    steps_conf = _load_from_yaml(steps_conf_file)
    steps_path = load_path / _STEPS_FOLDER_NAME
    if not steps_path.exists() or not steps_path.is_dir():
        raise MlflowException(
            f"Folder {steps_path} must exist and must be a directory "
            "in order to load runnable with steps."
        )
    steps = {}
    # ignore hidden files
    for step in (f for f in os.listdir(steps_path) if not f.startswith(".")):
        config = steps_conf.get(step)
        # load model from the folder of the step
        runnable = _load_model_from_path(os.path.join(steps_path, step), config)
        steps[step] = runnable
    if model_type == RunnableSequence.__name__:
        steps = [value for _, value in sorted(steps.items(), key=lambda item: int(item[0]))]
        return runnable_sequence_from_steps(steps)
    if model_type == RunnableParallel.__name__:
        return RunnableParallel(steps)
def runnable_sequence_from_steps(steps):
    """Construct a RunnableSequence from steps.
    Args:
        steps: List of steps to construct the RunnableSequence from.
    """
    from langchain.schema.runnable import RunnableSequence
    if len(steps) < 2:
        raise ValueError(f"RunnableSequence must have at least 2 steps, got {len(steps)}.")
    first, *middle, last = steps
    return RunnableSequence(first=first, middle=middle, last=last)
def _load_runnable_branch(file_path: Union[Path, str]):
    """Load the model
    Args:
        file_path: Path to file to load the model from.
    """
    from langchain.schema.runnable import RunnableBranch
    # Convert file to Path object.
    load_path = Path(file_path)
    if not load_path.exists() or not load_path.is_dir():
        raise MlflowException(
            f"File {load_path} must exist and must be a directory "
            "in order to load runnable with steps."
        )
    branches_conf_file = load_path / _RUNNABLE_BRANCHES_FILE_NAME
    if not branches_conf_file.exists():
        raise MlflowException(
            f"File {branches_conf_file} must exist in order to load runnable with steps."
        )
    branches_conf = _load_from_yaml(branches_conf_file)
    branches_path = load_path / _BRANCHES_FOLDER_NAME
    if not branches_path.exists() or not branches_path.is_dir():
        raise MlflowException(
            f"Folder {branches_path} must exist and must be a directory "
            "in order to load runnable with steps."
        )
    branches = []
    for branch in os.listdir(branches_path):
        # load model from the folder of the branch
        if branch == _DEFAULT_BRANCH_NAME:
            default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
            default = _load_model_from_path(
                default_branch_path, branches_conf.get(_DEFAULT_BRANCH_NAME)
            )
        else:
            branch_tuple = []
            for i in range(2):
                config = branches_conf.get(f"{branch}-{i}")
                runnable = _load_model_from_path(
                    os.path.join(branches_path, branch, str(i)), config
                )
                branch_tuple.append(runnable)
            branches.append(tuple(branch_tuple))
    # default branch must be the last branch
    branches.append(default)
    return RunnableBranch(*branches)
def _load_runnable_assign(file_path: Union[Path, str]):
    """Load the model
    Args:
        file_path: Path to file to load the model from.
    """
    from langchain.schema.runnable.passthrough import RunnableAssign
    # Convert file to Path object.
    load_path = Path(file_path)
    if not load_path.exists() or not load_path.is_dir():
        raise MlflowException(
            f"File {load_path} must exist and must be a directory in order to load runnable."
        )
    mapper_file = load_path / _MAPPER_FOLDER_NAME
    if not mapper_file.exists() or not mapper_file.is_dir():
        raise MlflowException(
            f"Folder {mapper_file} must exist and must be a directory "
            "in order to load runnable assign with mapper."
        )
    mapper = _load_runnable_with_steps(mapper_file, "RunnableParallel")
    return RunnableAssign(mapper)
def _save_internal_runnables(runnable, path, loader_fn, persist_dir):
    conf = {}
    if isinstance(runnable, lc_runnables_types()):
        conf[_MODEL_TYPE_KEY] = runnable.__class__.__name__
        conf.update(_save_runnables(runnable, path, loader_fn, persist_dir))
    elif isinstance(runnable, base_lc_types()):
        lc_model = _validate_and_wrap_lc_model(runnable, loader_fn)
        conf[_MODEL_TYPE_KEY] = lc_model.__class__.__name__
        conf.update(_save_base_lcs(lc_model, path, loader_fn, persist_dir))
    else:
        conf = {
            _MODEL_TYPE_KEY: runnable.__class__.__name__,
            _MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME,
            _MODEL_LOAD_KEY: _CONFIG_LOAD_KEY,
        }
        path = path / _MODEL_DATA_YAML_FILE_NAME
        # Save some simple runnables that langchain natively supports.
        if hasattr(runnable, "save"):
            runnable.save(path)
        # TODO: check if `dict` is enough to load it back
        elif hasattr(runnable, "dict"):
            runnable_dict = runnable.dict()
            with open(path, "w") as f:
                yaml.dump(runnable_dict, f, default_flow_style=False)
        else:
            return
    return conf
def _save_runnable_with_steps(model, file_path: Union[Path, str], loader_fn=None, persist_dir=None):
    """Save the model with steps. Currently it supports saving RunnableSequence and
    RunnableParallel.
    If saving a RunnableSequence, steps is a list of Runnable objects. We save each step to the
    subfolder named by the step index.
    e.g.  - model
            - steps
              - 0
                - model.yaml
              - 1
                - model.pkl
            - steps.yaml
    If saving a RunnableParallel, steps is a dictionary of key-Runnable pairs. We save each step to
    the subfolder named by the key.
    e.g.  - model
            - steps
              - context
                - model.yaml
              - question
                - model.pkl
            - steps.yaml
    We save steps.yaml file to the model folder. It contains each step's model's configuration.
    Args:
        model: Runnable to be saved.
        file_path: Path to file to save the model to.
    """
    # Convert file to Path object.
    save_path = Path(file_path)
    save_path.mkdir(parents=True, exist_ok=True)
    # Save steps into a folder
    steps_path = save_path / _STEPS_FOLDER_NAME
    steps_path.mkdir()
    steps = model.steps
    if isinstance(steps, list):
        generator = enumerate(steps)
    elif isinstance(steps, dict):
        generator = steps.items()
    else:
        raise MlflowException(
            f"Runnable {model} steps attribute must be either a list or a dictionary. "
            f"Got {type(steps).__name__}."
        )
    unsaved_runnables = {}
    steps_conf = {}
    for key, runnable in generator:
        step = str(key)
        # Save each step into a subfolder named by step
        save_runnable_path = steps_path / step
        save_runnable_path.mkdir()
        if result := _save_internal_runnables(runnable, save_runnable_path, loader_fn, persist_dir):
            steps_conf[step] = result
        else:
            unsaved_runnables[step] = str(runnable)
    if unsaved_runnables:
        raise MlflowException(
            f"Failed to save runnable sequence: {unsaved_runnables}. "
            "Runnable must have either `save` or `dict` method."
        )
    # save steps configs
    with save_path.joinpath(_RUNNABLE_STEPS_FILE_NAME).open("w") as f:
        yaml.dump(steps_conf, f, default_flow_style=False)
def _save_runnable_branch(model, file_path, loader_fn, persist_dir):
    """
    Save runnable branch in to path.
    """
    save_path = Path(file_path)
    save_path.mkdir(parents=True, exist_ok=True)
    # save branches into a folder
    branches_path = save_path / _BRANCHES_FOLDER_NAME
    branches_path.mkdir()
    unsaved_runnables = {}
    branches_conf = {}
    for index, branch_tuple in enumerate(model.branches):
        # Save each branch into a subfolder named by index
        # and save condition and runnable into subfolder
        for i, runnable in enumerate(branch_tuple):
            save_runnable_path = branches_path / str(index) / str(i)
            save_runnable_path.mkdir(parents=True)
            branches_conf[f"{index}-{i}"] = {}
            if result := _save_internal_runnables(
                runnable, save_runnable_path, loader_fn, persist_dir
            ):
                branches_conf[f"{index}-{i}"] = result
            else:
                unsaved_runnables[f"{index}-{i}"] = str(runnable)
    # save default branch
    default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
    default_branch_path.mkdir()
    if result := _save_internal_runnables(
        model.default, default_branch_path, loader_fn, persist_dir
    ):
        branches_conf[_DEFAULT_BRANCH_NAME] = result
    else:
        unsaved_runnables[_DEFAULT_BRANCH_NAME] = str(model.default)
    if unsaved_runnables:
        raise MlflowException(
            f"Failed to save runnable branch: {unsaved_runnables}. "
            "Runnable must have either `save` or `dict` method."
        )
    # save branches configs
    with save_path.joinpath(_RUNNABLE_BRANCHES_FILE_NAME).open("w") as f:
        yaml.dump(branches_conf, f, default_flow_style=False)
def _save_runnable_assign(model, file_path, loader_fn=None, persist_dir=None):
    from langchain.schema.runnable import RunnableParallel
    save_path = Path(file_path)
    save_path.mkdir(parents=True, exist_ok=True)
    # save mapper into a folder
    mapper_path = save_path / _MAPPER_FOLDER_NAME
    mapper_path.mkdir()
    if not isinstance(model.mapper, RunnableParallel):
        raise MlflowException(
            f"Failed to save model {model} with type {model.__class__.__name__}. "
            "RunnableAssign's mapper must be a RunnableParallel."
        )
    _save_runnable_with_steps(model.mapper, mapper_path, loader_fn, persist_dir)
def _save_picklable_runnable(model, path):
    if not path.endswith(".pkl"):
        raise ValueError(f"File path must end with .pkl, got {path}.")
    with open(path, "wb") as f:
        cloudpickle.dump(model, f)
def _save_runnables(model, path, loader_fn=None, persist_dir=None):
    model_data_kwargs = {_MODEL_LOAD_KEY: _RUNNABLE_LOAD_KEY}
    if isinstance(model, lc_runnable_with_steps_types()):
        model_data_path = _MODEL_DATA_FOLDER_NAME
        _save_runnable_with_steps(
            model, os.path.join(path, model_data_path), loader_fn, persist_dir
        )
    elif isinstance(model, picklable_runnable_types()):
        model_data_path = _MODEL_DATA_PKL_FILE_NAME
        _save_picklable_runnable(model, os.path.join(path, model_data_path))
    elif isinstance(model, lc_runnable_branch_types()):
        model_data_path = _MODEL_DATA_FOLDER_NAME
        _save_runnable_branch(model, os.path.join(path, model_data_path), loader_fn, persist_dir)
    elif isinstance(model, lc_runnable_assign_types()):
        model_data_path = _MODEL_DATA_FOLDER_NAME
        _save_runnable_assign(model, os.path.join(path, model_data_path), loader_fn, persist_dir)
    else:
        raise MlflowException.invalid_parameter_value(
            _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__)
        )
    model_data_kwargs.update({_MODEL_DATA_KEY: model_data_path})
    return model_data_kwargs
def _load_runnables(path, conf):
    model_type = conf.get(_MODEL_TYPE_KEY)
    model_data = conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME)
    if model_type in (x.__name__ for x in lc_runnable_with_steps_types()):
        return _load_runnable_with_steps(os.path.join(path, model_data), model_type)
    if (
        model_type in (x.__name__ for x in picklable_runnable_types())
        or model_data == _MODEL_DATA_PKL_FILE_NAME
    ):
        return _load_from_pickle(os.path.join(path, model_data))
    if model_type in (x.__name__ for x in lc_runnable_branch_types()):
        return _load_runnable_branch(os.path.join(path, model_data))
    if model_type in (x.__name__ for x in lc_runnable_assign_types()):
        return _load_runnable_assign(os.path.join(path, model_data))
    raise MlflowException.invalid_parameter_value(
        _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=model_type)
    )
 | 
	[
  "langchain.chains.loading.load_chain",
  "langchain.prompts.loading.load_prompt",
  "langchain.schema.runnable.RunnableSequence",
  "langchain.schema.runnable.RunnableParallel",
  "langchain.schema.runnable.passthrough.RunnableAssign",
  "langchain.schema.runnable.RunnableBranch",
  "langchain.llms.get_type_to_cls_dict",
  "langchain.llms.loading.load_llm"
] | 
	[((2386, 2443), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported type {_type} for loading."""'], {}), "(f'Unsupported type {_type} for loading.')\n", (2401, 2443), False, 'from mlflow.exceptions import MlflowException\n'), ((2853, 2915), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported model load key {model_load_fn}"""'], {}), "(f'Unsupported model load key {model_load_fn}')\n", (2868, 2915), False, 'from mlflow.exceptions import MlflowException\n'), ((3268, 3283), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3272, 3283), False, 'from pathlib import Path\n'), ((3745, 3777), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['steps_conf_file'], {}), '(steps_conf_file)\n', (3760, 3777), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((5047, 5102), 'langchain.schema.runnable.RunnableSequence', 'RunnableSequence', ([], {'first': 'first', 'middle': 'middle', 'last': 'last'}), '(first=first, middle=middle, last=last)\n', (5063, 5102), False, 'from langchain.schema.runnable import RunnableSequence\n'), ((5367, 5382), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (5371, 5382), False, 'from pathlib import Path\n'), ((5859, 5894), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['branches_conf_file'], {}), '(branches_conf_file)\n', (5874, 5894), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((6218, 6243), 'os.listdir', 'os.listdir', (['branches_path'], {}), '(branches_path)\n', (6228, 6243), False, 'import os\n'), ((7003, 7028), 'langchain.schema.runnable.RunnableBranch', 'RunnableBranch', (['*branches'], {}), '(*branches)\n', (7017, 7028), False, 'from langchain.schema.runnable import RunnableBranch\n'), ((7305, 7320), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (7309, 7320), False, 'from pathlib import Path\n'), ((7881, 7903), 'langchain.schema.runnable.passthrough.RunnableAssign', 'RunnableAssign', (['mapper'], {}), '(mapper)\n', (7895, 7903), False, 'from langchain.schema.runnable.passthrough import RunnableAssign\n'), ((10183, 10198), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (10187, 10198), False, 'from pathlib import Path\n'), ((11645, 11660), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11649, 11660), False, 'from pathlib import Path\n'), ((13438, 13453), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (13442, 13453), False, 'from pathlib import Path\n'), ((1579, 1607), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['config_path'], {}), '(config_path)\n', (1594, 1607), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1976, 1999), 'langchain.chains.loading.load_chain', 'load_chain', (['config_path'], {}), '(config_path)\n', (1986, 1999), False, 'from langchain.chains.loading import load_chain\n'), ((2707, 2741), 'mlflow.langchain.utils._load_base_lcs', '_load_base_lcs', (['path', 'model_config'], {}), '(path, model_config)\n', (2721, 2741), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((3355, 3474), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n    f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n    )\n", (3370, 3474), False, 'from mlflow.exceptions import MlflowException\n'), ((3614, 3710), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {steps_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n    f'File {steps_conf_file} must exist in order to load runnable with steps.')\n", (3629, 3710), False, 'from mlflow.exceptions import MlflowException\n'), ((3899, 4021), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {steps_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n    f'Folder {steps_path} must exist and must be a directory in order to load runnable with steps.'\n    )\n", (3914, 4021), False, 'from mlflow.exceptions import MlflowException\n'), ((4624, 4647), 'langchain.schema.runnable.RunnableParallel', 'RunnableParallel', (['steps'], {}), '(steps)\n', (4640, 4647), False, 'from langchain.schema.runnable import RunnableParallel\n'), ((5454, 5573), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n    f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n    )\n", (5469, 5573), False, 'from mlflow.exceptions import MlflowException\n'), ((5722, 5826), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {branches_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n    f'File {branches_conf_file} must exist in order to load runnable with steps.'\n    )\n", (5737, 5826), False, 'from mlflow.exceptions import MlflowException\n'), ((6028, 6153), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {branches_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n    f'Folder {branches_path} must exist and must be a directory in order to load runnable with steps.'\n    )\n", (6043, 6153), False, 'from mlflow.exceptions import MlflowException\n'), ((7392, 7500), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable."""'], {}), "(\n    f'File {load_path} must exist and must be a directory in order to load runnable.'\n    )\n", (7407, 7500), False, 'from mlflow.exceptions import MlflowException\n'), ((7639, 7770), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper."""'], {}), "(\n    f'Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper.'\n    )\n", (7654, 7770), False, 'from mlflow.exceptions import MlflowException\n'), ((8018, 8038), 'mlflow.langchain.utils.lc_runnables_types', 'lc_runnables_types', ([], {}), '()\n', (8036, 8038), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((11186, 11321), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n    f'Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n    )\n", (11201, 11321), False, 'from mlflow.exceptions import MlflowException\n'), ((11454, 11504), 'yaml.dump', 'yaml.dump', (['steps_conf', 'f'], {'default_flow_style': '(False)'}), '(steps_conf, f, default_flow_style=False)\n', (11463, 11504), False, 'import yaml\n'), ((12955, 13088), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n    f'Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n    )\n", (12970, 13088), False, 'from mlflow.exceptions import MlflowException\n'), ((13227, 13280), 'yaml.dump', 'yaml.dump', (['branches_conf', 'f'], {'default_flow_style': '(False)'}), '(branches_conf, f, default_flow_style=False)\n', (13236, 13280), False, 'import yaml\n'), ((13679, 13827), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."""'], {}), '(\n    f"Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."\n    )\n', (13694, 13827), False, 'from mlflow.exceptions import MlflowException\n'), ((14126, 14152), 'cloudpickle.dump', 'cloudpickle.dump', (['model', 'f'], {}), '(model, f)\n', (14142, 14152), False, 'import cloudpickle\n'), ((14310, 14340), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (14338, 14340), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16218, 16283), 'mlflow.langchain.utils._UNSUPPORTED_MODEL_ERROR_MESSAGE.format', '_UNSUPPORTED_MODEL_ERROR_MESSAGE.format', ([], {'instance_type': 'model_type'}), '(instance_type=model_type)\n', (16257, 16283), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1665, 1693), 'mlflow.langchain.utils._load_from_json', '_load_from_json', (['config_path'], {}), '(config_path)\n', (1680, 1693), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1718, 1810), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Cannot load runnable without a config file. Got path {config_path}."""'], {}), "(\n    f'Cannot load runnable without a config file. Got path {config_path}.')\n", (1733, 1810), False, 'from mlflow.exceptions import MlflowException\n'), ((2107, 2131), 'langchain.prompts.loading.load_prompt', 'load_prompt', (['config_path'], {}), '(config_path)\n', (2118, 2131), False, 'from langchain.prompts.loading import load_prompt\n'), ((4119, 4141), 'os.listdir', 'os.listdir', (['steps_path'], {}), '(steps_path)\n', (4129, 4141), False, 'import os\n'), ((4297, 4327), 'os.path.join', 'os.path.join', (['steps_path', 'step'], {}), '(steps_path, step)\n', (4309, 4327), False, 'import os\n'), ((8208, 8223), 'mlflow.langchain.utils.base_lc_types', 'base_lc_types', ([], {}), '()\n', (8221, 8223), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((8245, 8293), 'mlflow.langchain.utils._validate_and_wrap_lc_model', '_validate_and_wrap_lc_model', (['runnable', 'loader_fn'], {}), '(runnable, loader_fn)\n', (8272, 8293), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14447, 14482), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14459, 14482), False, 'import os\n'), ((14544, 14570), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (14568, 14570), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15632, 15662), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15644, 15662), False, 'import os\n'), ((15847, 15877), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15859, 15877), False, 'import os\n'), ((15987, 16017), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15999, 16017), False, 'import os\n'), ((16127, 16157), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (16139, 16157), False, 'import os\n'), ((2150, 2177), 'langchain.llms.get_type_to_cls_dict', 'llms_get_type_to_cls_dict', ([], {}), '()\n', (2175, 2177), True, 'from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict\n'), ((2247, 2268), 'langchain.llms.loading.load_llm', 'load_llm', (['config_path'], {}), '(config_path)\n', (2255, 2268), False, 'from langchain.llms.loading import load_llm\n'), ((8374, 8428), 'mlflow.langchain.utils._save_base_lcs', '_save_base_lcs', (['lc_model', 'path', 'loader_fn', 'persist_dir'], {}), '(lc_model, path, loader_fn, persist_dir)\n', (8388, 8428), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14665, 14700), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14677, 14700), False, 'import os\n'), ((14729, 14755), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (14753, 14755), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15558, 15588), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (15586, 15588), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15921, 15947), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (15945, 15947), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16061, 16087), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (16085, 16087), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((2287, 2315), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2313, 2315), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14845, 14880), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14857, 14880), False, 'import os\n'), ((14933, 14959), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (14957, 14959), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15728, 15754), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (15752, 15754), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((9016, 9069), 'yaml.dump', 'yaml.dump', (['runnable_dict', 'f'], {'default_flow_style': '(False)'}), '(runnable_dict, f, default_flow_style=False)\n', (9025, 9069), False, 'import yaml\n'), ((15049, 15084), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (15061, 15084), False, 'import os\n'), ((2332, 2360), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2358, 2360), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n')] | 
| 
	import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
    CharacterTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
    TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter
from llama_index import SimpleDirectoryReader
from llama_index.node_parser.interface import TextSplitter
from llama_index.schema import Document
from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter
from streamlit.runtime.uploaded_file_manager import UploadedFile
DEFAULT_TEXT = "The quick brown fox jumps over the lazy dog."
text = st.sidebar.text_area("Enter text", value=DEFAULT_TEXT)
uploaded_files = st.sidebar.file_uploader("Upload file", accept_multiple_files=True)
type = st.sidebar.radio("Document Type", options=["Text", "Code"])
n_cols = st.sidebar.number_input("Columns", value=2, min_value=1, max_value=3)
assert isinstance(n_cols, int)
@st.cache_resource(ttl=3600)
def load_document(uploaded_files: List[UploadedFile]) -> List[Document]:
    # Read documents
    temp_dir = tempfile.TemporaryDirectory()
    for file in uploaded_files:
        temp_filepath = os.path.join(temp_dir.name, file.name)
        with open(temp_filepath, "wb") as f:
            f.write(file.getvalue())
    reader = SimpleDirectoryReader(input_dir=temp_dir.name)
    return reader.load_data()
if uploaded_files:
    if text != DEFAULT_TEXT:
        st.warning("Text will be ignored when uploading files")
    docs = load_document(uploaded_files)
    text = "\n".join([doc.text for doc in docs])
chunk_size = st.slider(
    "Chunk Size",
    value=512,
    min_value=1,
    max_value=4096,
)
chunk_overlap = st.slider(
    "Chunk Overlap",
    value=0,
    min_value=0,
    max_value=4096,
)
cols = st.columns(n_cols)
for ind, col in enumerate(cols):
    if type == "Text":
        text_splitter_cls = col.selectbox(
            "Text Splitter",
            options=[
                "TokenTextSplitter",
                "SentenceSplitter",
                "LC:RecursiveCharacterTextSplitter",
                "LC:CharacterTextSplitter",
                "LC:TokenTextSplitter",
            ],
            index=ind,
            key=f"splitter_cls_{ind}",
        )
        text_splitter: Union[TextSplitter, LCSplitter]
        if text_splitter_cls == "TokenTextSplitter":
            text_splitter = TokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "SentenceSplitter":
            text_splitter = SentenceSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:RecursiveCharacterTextSplitter":
            text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:CharacterTextSplitter":
            text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:TokenTextSplitter":
            text_splitter = LCTokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        else:
            raise ValueError("Unknown text splitter")
    elif type == "Code":
        text_splitter_cls = col.selectbox("Text Splitter", options=["CodeSplitter"])
        if text_splitter_cls == "CodeSplitter":
            language = col.text_input("Language", value="python")
            max_chars = col.slider("Max Chars", value=1500)
            text_splitter = CodeSplitter(language=language, max_chars=max_chars)
        else:
            raise ValueError("Unknown text splitter")
    chunks = text_splitter.split_text(text)
    tokenizer = tiktoken.get_encoding("gpt2").encode
    for chunk_ind, chunk in enumerate(chunks):
        n_tokens = len(tokenizer(chunk))
        n_chars = len(chunk)
        col.text_area(
            f"Chunk {chunk_ind} - {n_tokens} tokens - {n_chars} chars",
            chunk,
            key=f"text_area_{ind}_{chunk_ind}",
            height=500,
        )
 | 
	[
  "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.TokenTextSplitter"
] | 
	[((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_multiple_files': '(True)'}), "('Upload file', accept_multiple_files=True)\n", (814, 857), True, 'import streamlit as st\n'), ((865, 924), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Document Type"""'], {'options': "['Text', 'Code']"}), "('Document Type', options=['Text', 'Code'])\n", (881, 924), True, 'import streamlit as st\n'), ((934, 1003), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Columns"""'], {'value': '(2)', 'min_value': '(1)', 'max_value': '(3)'}), "('Columns', value=2, min_value=1, max_value=3)\n", (957, 1003), True, 'import streamlit as st\n'), ((1038, 1065), 'streamlit.cache_resource', 'st.cache_resource', ([], {'ttl': '(3600)'}), '(ttl=3600)\n', (1055, 1065), True, 'import streamlit as st\n'), ((1692, 1755), 'streamlit.slider', 'st.slider', (['"""Chunk Size"""'], {'value': '(512)', 'min_value': '(1)', 'max_value': '(4096)'}), "('Chunk Size', value=512, min_value=1, max_value=4096)\n", (1701, 1755), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.slider', 'st.slider', (['"""Chunk Overlap"""'], {'value': '(0)', 'min_value': '(0)', 'max_value': '(4096)'}), "('Chunk Overlap', value=0, min_value=0, max_value=4096)\n", (1800, 1855), True, 'import streamlit as st\n'), ((1883, 1901), 'streamlit.columns', 'st.columns', (['n_cols'], {}), '(n_cols)\n', (1893, 1901), True, 'import streamlit as st\n'), ((1175, 1204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1202, 1204), False, 'import tempfile\n'), ((1396, 1442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'temp_dir.name'}), '(input_dir=temp_dir.name)\n', (1417, 1442), False, 'from llama_index import SimpleDirectoryReader\n'), ((1261, 1299), 'os.path.join', 'os.path.join', (['temp_dir.name', 'file.name'], {}), '(temp_dir.name, file.name)\n', (1273, 1299), False, 'import os\n'), ((1531, 1586), 'streamlit.warning', 'st.warning', (['"""Text will be ignored when uploading files"""'], {}), "('Text will be ignored when uploading files')\n", (1541, 1586), True, 'import streamlit as st\n'), ((3968, 3997), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (3989, 3997), False, 'import tiktoken\n'), ((2486, 2555), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2503, 2555), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2668, 2736), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2684, 2736), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((3786, 3838), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': 'language', 'max_chars': 'max_chars'}), '(language=language, max_chars=max_chars)\n', (3798, 3838), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2866, 2974), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (2918, 2974), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3091, 3190), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (3134, 3190), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3303, 3374), 'langchain.text_splitter.TokenTextSplitter', 'LCTokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3322, 3374), True, 'from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter\n')] | 
| 
	import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
    # Remove the triple backticks if present
    json_string = json_string.strip()
    start_index = json_string.find("```json")
    end_index = json_string.find("```", start_index + len("```json"))
    if start_index != -1 and end_index != -1:
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif start_index != -1 and end_index == -1 and json_string.endswith("``"):
        end_index = json_string.find("``", start_index + len("```json"))
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif json_string.startswith("{"):
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(json_string)
    else:
        raise Exception("Could not find JSON block in the output.")
    return parsed
def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
    try:
        json_obj = parse_json_markdown(text)
    except json.JSONDecodeError as e:
        raise OutputParserException(f"Got invalid JSON object. Error: {e}")
    for key in expected_keys:
        if key not in json_obj:
            raise OutputParserException(
                f"Got invalid return object. Expected key `{key}` "
                f"to be present, but got {json_obj}"
            )
    return json_obj
 | 
	[
  "langchain.schema.OutputParserException"
] | 
	[((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n    f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n    )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')] | 
| 
	# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
    def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf
        self.sentence_size = sentence_size
    def split_text1(self, text: str) -> List[str]:
        if self.pdf:
            text = re.sub(r"\n{3,}", "\n", text)
            text = re.sub('\s', ' ', text)
            text = text.replace("\n\n", "")
        sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')  # del :;
        sent_list = []
        for ele in sent_sep_pattern.split(text):
            if sent_sep_pattern.match(ele) and sent_list:
                sent_list[-1] += ele
            elif ele:
                sent_list.append(ele)
        return sent_list
    def split_text(self, text: str) -> List[str]:   ##此处需要进一步优化逻辑
        if self.pdf:
            text = re.sub(r"\n{3,}", r"\n", text)
            text = re.sub('\s', " ", text)
            text = re.sub("\n\n", "", text)
        text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text)  # 单字符断句符
        text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)  # 英文省略号
        text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)  # 中文省略号
        text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
        # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
        text = text.rstrip()  # 段尾如果有多余的\n就去掉它
        # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
        ls = [i for i in text.split("\n") if i]
        for ele in ls:
            if len(ele) > self.sentence_size:
                ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
                ele1_ls = ele1.split("\n")
                for ele_ele1 in ele1_ls:
                    if len(ele_ele1) > self.sentence_size:
                        ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
                        ele2_ls = ele_ele2.split("\n")
                        for ele_ele2 in ele2_ls:
                            if len(ele_ele2) > self.sentence_size:
                                ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
                                ele2_id = ele2_ls.index(ele_ele2)
                                ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
                                                                                                       ele2_id + 1:]
                        ele_id = ele1_ls.index(ele_ele1)
                        ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
                id = ls.index(ele)
                ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
        return ls
def load_file(filepath, sentence_size):
    loader = UnstructuredFileLoader(filepath, mode="elements")
    textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
    docs = loader.load_and_split(text_splitter=textsplitter)
    # write_check_file(filepath, docs)
    return docs
 | 
	[
  "langchain.document_loaders.UnstructuredFileLoader"
] | 
	[((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))"""'], {}), '(\'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))\')\n', (667, 714), False, 'import re\n'), ((1218, 1269), 're.sub', 're.sub', (['"""([;;.!?。!?\\\\?])([^”’])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), "('([;;.!?。!?\\\\?])([^”’])', '\\\\1\\\\n\\\\2', text)\n", (1224, 1269), False, 'import re\n'), ((1293, 1340), 're.sub', 're.sub', (['"""(\\\\.{6})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\.{6})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1299, 1340), False, 'import re\n'), ((1363, 1410), 're.sub', 're.sub', (['"""(\\\\…{2})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\…{2})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1369, 1410), False, 'import re\n'), ((1433, 1504), 're.sub', 're.sub', (['"""([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])\', \'\\\\1\\\\n\\\\2\', text)\n', (1439, 1504), False, 'import re\n'), ((513, 542), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\n"""', 'text'], {}), "('\\\\n{3,}', '\\n', text)\n", (519, 542), False, 'import re\n'), ((562, 586), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (568, 586), False, 'import re\n'), ((1084, 1114), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\\\\n"""', 'text'], {}), "('\\\\n{3,}', '\\\\n', text)\n", (1090, 1114), False, 'import re\n'), ((1134, 1158), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (1140, 1158), False, 'import re\n'), ((1177, 1201), 're.sub', 're.sub', (['"""\n\n"""', '""""""', 'text'], {}), "('\\n\\n', '', text)\n", (1183, 1201), False, 'import re\n'), ((1816, 1871), 're.sub', 're.sub', (['"""([,,.]["’”」』]{0,2})([^,,.])"""', '"""\\\\1\\\\n\\\\2"""', 'ele'], {}), '(\'([,,.]["’”」』]{0,2})([^,,.])\', \'\\\\1\\\\n\\\\2\', ele)\n', (1822, 1871), False, 'import re\n'), ((2049, 2119), 're.sub', 're.sub', (['"""([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele1'], {}), '(\'([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])\', \'\\\\1\\\\n\\\\2\', ele_ele1)\n', (2055, 2119), False, 'import re\n'), ((2331, 2385), 're.sub', 're.sub', (['"""( ["’”」』]{0,2})([^ ])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele2'], {}), '(\'( ["’”」』]{0,2})([^ ])\', \'\\\\1\\\\n\\\\2\', ele_ele2)\n', (2337, 2385), False, 'import re\n')] | 
| 
	import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackHandler
from langchain_openai import ChatOpenAI
from pydantic import (
    UUID4,
    BaseModel,
    ConfigDict,
    Field,
    InstanceOf,
    PrivateAttr,
    field_validator,
    model_validator,
)
from pydantic_core import PydanticCustomError
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
from crewai.utilities import I18N, Logger, Prompts, RPMController
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
class Agent(BaseModel):
    """Represents an agent in a system.
    Each agent has a role, a goal, a backstory, and an optional language model (llm).
    The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
    Attributes:
            agent_executor: An instance of the CrewAgentExecutor class.
            role: The role of the agent.
            goal: The objective of the agent.
            backstory: The backstory of the agent.
            config: Dict representation of agent configuration.
            llm: The language model that will run the agent.
            function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm.
            max_iter: Maximum number of iterations for an agent to execute a task.
            memory: Whether the agent should have memory or not.
            max_rpm: Maximum number of requests per minute for the agent execution to be respected.
            verbose: Whether the agent execution should be in verbose mode.
            allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
            tools: Tools at agents disposal
            step_callback: Callback to be executed after each step of the agent execution.
            callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
    """
    __hash__ = object.__hash__  # type: ignore
    _logger: Logger = PrivateAttr()
    _rpm_controller: RPMController = PrivateAttr(default=None)
    _request_within_rpm_limit: Any = PrivateAttr(default=None)
    _token_process: TokenProcess = TokenProcess()
    formatting_errors: int = 0
    model_config = ConfigDict(arbitrary_types_allowed=True)
    id: UUID4 = Field(
        default_factory=uuid.uuid4,
        frozen=True,
        description="Unique identifier for the object, not set by user.",
    )
    role: str = Field(description="Role of the agent")
    goal: str = Field(description="Objective of the agent")
    backstory: str = Field(description="Backstory of the agent")
    config: Optional[Dict[str, Any]] = Field(
        description="Configuration for the agent",
        default=None,
    )
    max_rpm: Optional[int] = Field(
        default=None,
        description="Maximum number of requests per minute for the agent execution to be respected.",
    )
    memory: bool = Field(
        default=False, description="Whether the agent should have memory or not"
    )
    verbose: bool = Field(
        default=False, description="Verbose mode for the Agent Execution"
    )
    allow_delegation: bool = Field(
        default=True, description="Allow delegation of tasks to agents"
    )
    tools: Optional[List[Any]] = Field(
        default_factory=list, description="Tools at agents disposal"
    )
    max_iter: Optional[int] = Field(
        default=15, description="Maximum iterations for an agent to execute a task"
    )
    agent_executor: InstanceOf[CrewAgentExecutor] = Field(
        default=None, description="An instance of the CrewAgentExecutor class."
    )
    tools_handler: InstanceOf[ToolsHandler] = Field(
        default=None, description="An instance of the ToolsHandler class."
    )
    cache_handler: InstanceOf[CacheHandler] = Field(
        default=CacheHandler(), description="An instance of the CacheHandler class."
    )
    step_callback: Optional[Any] = Field(
        default=None,
        description="Callback to be executed after each step of the agent execution.",
    )
    i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
    llm: Any = Field(
        default_factory=lambda: ChatOpenAI(
            model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4")
        ),
        description="Language model that will run the agent.",
    )
    function_calling_llm: Optional[Any] = Field(
        description="Language model that will run the agent.", default=None
    )
    callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
        default=None, description="Callback to be executed"
    )
    def __init__(__pydantic_self__, **data):
        config = data.pop("config", {})
        super().__init__(**config, **data)
    @field_validator("id", mode="before")
    @classmethod
    def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
        if v:
            raise PydanticCustomError(
                "may_not_set_field", "This field is not to be set by the user.", {}
            )
    @model_validator(mode="after")
    def set_attributes_based_on_config(self) -> "Agent":
        """Set attributes based on the agent configuration."""
        if self.config:
            for key, value in self.config.items():
                setattr(self, key, value)
        return self
    @model_validator(mode="after")
    def set_private_attrs(self):
        """Set private attributes."""
        self._logger = Logger(self.verbose)
        if self.max_rpm and not self._rpm_controller:
            self._rpm_controller = RPMController(
                max_rpm=self.max_rpm, logger=self._logger
            )
        return self
    @model_validator(mode="after")
    def set_agent_executor(self) -> "Agent":
        """set agent executor is set."""
        if hasattr(self.llm, "model_name"):
            self.llm.callbacks = [
                TokenCalcHandler(self.llm.model_name, self._token_process)
            ]
        if not self.agent_executor:
            self.set_cache_handler(self.cache_handler)
        return self
    def execute_task(
        self,
        task: Any,
        context: Optional[str] = None,
        tools: Optional[List[Any]] = None,
    ) -> str:
        """Execute a task with the agent.
        Args:
            task: Task to execute.
            context: Context to execute the task in.
            tools: Tools to use for the task.
        Returns:
            Output of the agent
        """
        self.tools_handler.last_used_tool = {}
        task_prompt = task.prompt()
        if context:
            task_prompt = self.i18n.slice("task_with_context").format(
                task=task_prompt, context=context
            )
        tools = self._parse_tools(tools or self.tools)
        self.create_agent_executor(tools=tools)
        self.agent_executor.tools = tools
        self.agent_executor.task = task
        self.agent_executor.tools_description = render_text_description(tools)
        self.agent_executor.tools_names = self.__tools_names(tools)
        result = self.agent_executor.invoke(
            {
                "input": task_prompt,
                "tool_names": self.agent_executor.tools_names,
                "tools": self.agent_executor.tools_description,
            }
        )["output"]
        if self.max_rpm:
            self._rpm_controller.stop_rpm_counter()
        return result
    def set_cache_handler(self, cache_handler: CacheHandler) -> None:
        """Set the cache handler for the agent.
        Args:
            cache_handler: An instance of the CacheHandler class.
        """
        self.cache_handler = cache_handler
        self.tools_handler = ToolsHandler(cache=self.cache_handler)
        self.create_agent_executor()
    def set_rpm_controller(self, rpm_controller: RPMController) -> None:
        """Set the rpm controller for the agent.
        Args:
            rpm_controller: An instance of the RPMController class.
        """
        if not self._rpm_controller:
            self._rpm_controller = rpm_controller
            self.create_agent_executor()
    def create_agent_executor(self, tools=None) -> None:
        """Create an agent executor for the agent.
        Returns:
            An instance of the CrewAgentExecutor class.
        """
        tools = tools or self.tools
        agent_args = {
            "input": lambda x: x["input"],
            "tools": lambda x: x["tools"],
            "tool_names": lambda x: x["tool_names"],
            "agent_scratchpad": lambda x: self.format_log_to_str(
                x["intermediate_steps"]
            ),
        }
        executor_args = {
            "llm": self.llm,
            "i18n": self.i18n,
            "tools": self._parse_tools(tools),
            "verbose": self.verbose,
            "handle_parsing_errors": True,
            "max_iterations": self.max_iter,
            "step_callback": self.step_callback,
            "tools_handler": self.tools_handler,
            "function_calling_llm": self.function_calling_llm,
            "callbacks": self.callbacks,
        }
        if self._rpm_controller:
            executor_args[
                "request_within_rpm_limit"
            ] = self._rpm_controller.check_or_wait
        if self.memory:
            summary_memory = ConversationSummaryMemory(
                llm=self.llm, input_key="input", memory_key="chat_history"
            )
            executor_args["memory"] = summary_memory
            agent_args["chat_history"] = lambda x: x["chat_history"]
            prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory()
        else:
            prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
        execution_prompt = prompt.partial(
            goal=self.goal,
            role=self.role,
            backstory=self.backstory,
        )
        bind = self.llm.bind(stop=[self.i18n.slice("observation")])
        inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
        self.agent_executor = CrewAgentExecutor(
            agent=RunnableAgent(runnable=inner_agent), **executor_args
        )
    def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
        """Interpolate inputs into the agent description and backstory."""
        if inputs:
            self.role = self.role.format(**inputs)
            self.goal = self.goal.format(**inputs)
            self.backstory = self.backstory.format(**inputs)
    def increment_formatting_errors(self) -> None:
        """Count the formatting errors of the agent."""
        self.formatting_errors += 1
    def format_log_to_str(
        self,
        intermediate_steps: List[Tuple[AgentAction, str]],
        observation_prefix: str = "Observation: ",
        llm_prefix: str = "",
    ) -> str:
        """Construct the scratchpad that lets the agent continue its thought process."""
        thoughts = ""
        for action, observation in intermediate_steps:
            thoughts += action.log
            thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
        return thoughts
    def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
        """Parse tools to be used for the task."""
        # tentatively try to import from crewai_tools import BaseTool as CrewAITool
        tools_list = []
        try:
            from crewai_tools import BaseTool as CrewAITool
            for tool in tools:
                if isinstance(tool, CrewAITool):
                    tools_list.append(tool.to_langchain())
                else:
                    tools_list.append(tool)
        except ModuleNotFoundError:
            for tool in tools:
                tools_list.append(tool)
        return tools_list
    @staticmethod
    def __tools_names(tools) -> str:
        return ", ".join([t.name for t in tools])
    def __repr__(self):
        return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
 | 
	[
  "langchain.memory.ConversationSummaryMemory",
  "langchain.agents.agent.RunnableAgent",
  "langchain.tools.render.render_text_description"
] | 
	[((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2454, 2468), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2506, 2531), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2517, 2531), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2567, 2581), 'crewai.utilities.token_counter_callback.TokenProcess', 'TokenProcess', ([], {}), '()\n', (2579, 2581), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((2633, 2673), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (2643, 2673), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2690, 2807), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'frozen': '(True)', 'description': '"""Unique identifier for the object, not set by user."""'}), "(default_factory=uuid.uuid4, frozen=True, description=\n    'Unique identifier for the object, not set by user.')\n", (2695, 2807), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2850, 2888), 'pydantic.Field', 'Field', ([], {'description': '"""Role of the agent"""'}), "(description='Role of the agent')\n", (2855, 2888), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2905, 2948), 'pydantic.Field', 'Field', ([], {'description': '"""Objective of the agent"""'}), "(description='Objective of the agent')\n", (2910, 2948), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2970, 3013), 'pydantic.Field', 'Field', ([], {'description': '"""Backstory of the agent"""'}), "(description='Backstory of the agent')\n", (2975, 3013), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3053, 3115), 'pydantic.Field', 'Field', ([], {'description': '"""Configuration for the agent"""', 'default': 'None'}), "(description='Configuration for the agent', default=None)\n", (3058, 3115), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3168, 3291), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Maximum number of requests per minute for the agent execution to be respected."""'}), "(default=None, description=\n    'Maximum number of requests per minute for the agent execution to be respected.'\n    )\n", (3173, 3291), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3324, 3403), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the agent should have memory or not"""'}), "(default=False, description='Whether the agent should have memory or not')\n", (3329, 3403), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3438, 3510), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Verbose mode for the Agent Execution"""'}), "(default=False, description='Verbose mode for the Agent Execution')\n", (3443, 3510), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3554, 3624), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Allow delegation of tasks to agents"""'}), "(default=True, description='Allow delegation of tasks to agents')\n", (3559, 3624), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3672, 3739), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Tools at agents disposal"""'}), "(default_factory=list, description='Tools at agents disposal')\n", (3677, 3739), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3784, 3871), 'pydantic.Field', 'Field', ([], {'default': '(15)', 'description': '"""Maximum iterations for an agent to execute a task"""'}), "(default=15, description=\n    'Maximum iterations for an agent to execute a task')\n", (3789, 3871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3933, 4011), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the CrewAgentExecutor class."""'}), "(default=None, description='An instance of the CrewAgentExecutor class.')\n", (3938, 4011), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4072, 4145), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the ToolsHandler class."""'}), "(default=None, description='An instance of the ToolsHandler class.')\n", (4077, 4145), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4339, 4442), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed after each step of the agent execution."""'}), "(default=None, description=\n    'Callback to be executed after each step of the agent execution.')\n", (4344, 4442), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4797, 4871), 'pydantic.Field', 'Field', ([], {'description': '"""Language model that will run the agent."""', 'default': 'None'}), "(description='Language model that will run the agent.', default=None)\n", (4802, 4871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4951, 5009), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed"""'}), "(default=None, description='Callback to be executed')\n", (4956, 5009), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5159, 5195), 'pydantic.field_validator', 'field_validator', (['"""id"""'], {'mode': '"""before"""'}), "('id', mode='before')\n", (5174, 5195), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5430, 5459), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5445, 5459), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5723, 5752), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5738, 5752), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((6070, 6099), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (6085, 6099), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5847, 5867), 'crewai.utilities.Logger', 'Logger', (['self.verbose'], {}), '(self.verbose)\n', (5853, 5867), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((7345, 7375), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (7368, 7375), False, 'from langchain.tools.render import render_text_description\n'), ((8088, 8126), 'crewai.agents.ToolsHandler', 'ToolsHandler', ([], {'cache': 'self.cache_handler'}), '(cache=self.cache_handler)\n', (8100, 8126), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4229, 4243), 'crewai.agents.CacheHandler', 'CacheHandler', ([], {}), '()\n', (4241, 4243), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4492, 4498), 'crewai.utilities.I18N', 'I18N', ([], {}), '()\n', (4496, 4498), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((5305, 5397), 'pydantic_core.PydanticCustomError', 'PydanticCustomError', (['"""may_not_set_field"""', '"""This field is not to be set by the user."""', '{}'], {}), "('may_not_set_field',\n    'This field is not to be set by the user.', {})\n", (5324, 5397), False, 'from pydantic_core import PydanticCustomError\n'), ((5957, 6013), 'crewai.utilities.RPMController', 'RPMController', ([], {'max_rpm': 'self.max_rpm', 'logger': 'self._logger'}), '(max_rpm=self.max_rpm, logger=self._logger)\n', (5970, 6013), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((9715, 9805), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'self.llm', 'input_key': '"""input"""', 'memory_key': '"""chat_history"""'}), "(llm=self.llm, input_key='input', memory_key=\n    'chat_history')\n", (9740, 9805), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((10407, 10434), 'crewai.agents.CrewAgentParser', 'CrewAgentParser', ([], {'agent': 'self'}), '(agent=self)\n', (10422, 10434), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((6281, 6339), 'crewai.utilities.token_counter_callback.TokenCalcHandler', 'TokenCalcHandler', (['self.llm.model_name', 'self._token_process'], {}), '(self.llm.model_name, self._token_process)\n', (6297, 6339), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((10502, 10537), 'langchain.agents.agent.RunnableAgent', 'RunnableAgent', ([], {'runnable': 'inner_agent'}), '(runnable=inner_agent)\n', (10515, 10537), False, 'from langchain.agents.agent import RunnableAgent\n'), ((9974, 10010), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (9981, 10010), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((10075, 10111), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (10082, 10111), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((4630, 4674), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MODEL_NAME"""', '"""gpt-4"""'], {}), "('OPENAI_MODEL_NAME', 'gpt-4')\n", (4644, 4674), False, 'import os\n')] | 
| 
	import os
import logging
import hashlib
import PyPDF2
from tqdm import tqdm
from modules.presets import *
from modules.utils import *
from modules.config import local_embedding
def get_documents(file_src):
    from langchain.schema import Document
    from langchain.text_splitter import TokenTextSplitter
    text_splitter = TokenTextSplitter(chunk_size=500, chunk_overlap=30)
    documents = []
    logging.debug("Loading documents...")
    logging.debug(f"file_src: {file_src}")
    for file in file_src:
        filepath = file.name
        filename = os.path.basename(filepath)
        file_type = os.path.splitext(filename)[1]
        logging.info(f"loading file: {filename}")
        texts = None
        try:
            if file_type == ".pdf":
                logging.debug("Loading PDF...")
                try:
                    from modules.pdf_func import parse_pdf
                    from modules.config import advance_docs
                    two_column = advance_docs["pdf"].get("two_column", False)
                    pdftext = parse_pdf(filepath, two_column).text
                except:
                    pdftext = ""
                    with open(filepath, "rb") as pdfFileObj:
                        pdfReader = PyPDF2.PdfReader(pdfFileObj)
                        for page in tqdm(pdfReader.pages):
                            pdftext += page.extract_text()
                texts = [Document(page_content=pdftext,
                                  metadata={"source": filepath})]
            elif file_type == ".docx":
                logging.debug("Loading Word...")
                from langchain.document_loaders import UnstructuredWordDocumentLoader
                loader = UnstructuredWordDocumentLoader(filepath)
                texts = loader.load()
            elif file_type == ".pptx":
                logging.debug("Loading PowerPoint...")
                from langchain.document_loaders import UnstructuredPowerPointLoader
                loader = UnstructuredPowerPointLoader(filepath)
                texts = loader.load()
            elif file_type == ".epub":
                logging.debug("Loading EPUB...")
                from langchain.document_loaders import UnstructuredEPubLoader
                loader = UnstructuredEPubLoader(filepath)
                texts = loader.load()
            elif file_type == ".xlsx":
                logging.debug("Loading Excel...")
                text_list = excel_to_string(filepath)
                texts = []
                for elem in text_list:
                    texts.append(Document(page_content=elem,
                                 metadata={"source": filepath}))
            elif file_type in [".jpg", ".jpeg", ".png", ".heif", ".heic", ".webp", ".bmp", ".gif", ".tiff", ".tif"]:
                raise gr.Warning(i18n("不支持的文件: ") + filename + i18n(",请使用 .pdf, .docx, .pptx, .epub, .xlsx 等文档。"))
            else:
                logging.debug("Loading text file...")
                from langchain.document_loaders import TextLoader
                loader = TextLoader(filepath, "utf8")
                texts = loader.load()
        except Exception as e:
            import traceback
            logging.error(f"Error loading file: {filename}")
            traceback.print_exc()
        if texts is not None:
            texts = text_splitter.split_documents(texts)
            documents.extend(texts)
    logging.debug("Documents loaded.")
    return documents
def construct_index(
    api_key,
    file_src,
    max_input_size=4096,
    num_outputs=5,
    max_chunk_overlap=20,
    chunk_size_limit=600,
    embedding_limit=None,
    separator=" ",
    load_from_cache_if_possible=True,
):
    from langchain.chat_models import ChatOpenAI
    from langchain.vectorstores import FAISS
    if api_key:
        os.environ["OPENAI_API_KEY"] = api_key
    else:
        # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
        os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
    logging.debug(f"api base: {os.environ.get('OPENAI_API_BASE', None)}")
    chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
    embedding_limit = None if embedding_limit == 0 else embedding_limit
    separator = " " if separator == "" else separator
    index_name = get_file_hash(file_src)
    index_path = f"./index/{index_name}"
    if local_embedding:
        from langchain.embeddings.huggingface import HuggingFaceEmbeddings
        embeddings = HuggingFaceEmbeddings(
            model_name="sentence-transformers/distiluse-base-multilingual-cased-v2")
    else:
        from langchain.embeddings import OpenAIEmbeddings
        if os.environ.get("OPENAI_API_TYPE", "openai") == "openai":
            embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get(
                "OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key))
        else:
            embeddings = OpenAIEmbeddings(deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
                                          model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure")
    if os.path.exists(index_path) and load_from_cache_if_possible:
        logging.info(i18n("找到了缓存的索引文件,加载中……"))
        return FAISS.load_local(index_path, embeddings)
    else:
        documents = get_documents(file_src)
        logging.debug(i18n("构建索引中……"))
        if documents:
            with retrieve_proxy():
                index = FAISS.from_documents(documents, embeddings)
        else:
            raise Exception(i18n("没有找到任何支持的文档。"))
        logging.debug(i18n("索引构建完成!"))
        os.makedirs("./index", exist_ok=True)
        index.save_local(index_path)
        logging.debug(i18n("索引已保存至本地!"))
        return index
 | 
	[
  "langchain.document_loaders.UnstructuredWordDocumentLoader",
  "langchain.embeddings.huggingface.HuggingFaceEmbeddings",
  "langchain.vectorstores.FAISS.load_local",
  "langchain.document_loaders.TextLoader",
  "langchain.document_loaders.UnstructuredPowerPointLoader",
  "langchain.document_loaders.UnstructuredEPubLoader",
  "langchain.schema.Document",
  "langchain.vectorstores.FAISS.from_documents",
  "langchain.text_splitter.TokenTextSplitter",
  "langchain.embeddings.OpenAIEmbeddings"
] | 
	[((330, 381), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(30)'}), '(chunk_size=500, chunk_overlap=30)\n', (347, 381), False, 'from langchain.text_splitter import TokenTextSplitter\n'), ((406, 443), 'logging.debug', 'logging.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (419, 443), False, 'import logging\n'), ((448, 486), 'logging.debug', 'logging.debug', (['f"""file_src: {file_src}"""'], {}), "(f'file_src: {file_src}')\n", (461, 486), False, 'import logging\n'), ((3415, 3449), 'logging.debug', 'logging.debug', (['"""Documents loaded."""'], {}), "('Documents loaded.')\n", (3428, 3449), False, 'import logging\n'), ((561, 587), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (577, 587), False, 'import os\n'), ((646, 687), 'logging.info', 'logging.info', (['f"""loading file: {filename}"""'], {}), "(f'loading file: {filename}')\n", (658, 687), False, 'import logging\n'), ((4440, 4539), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/distiluse-base-multilingual-cased-v2"""'}), "(model_name=\n    'sentence-transformers/distiluse-base-multilingual-cased-v2')\n", (4461, 4539), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((5212, 5238), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (5226, 5238), False, 'import os\n'), ((5334, 5374), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['index_path', 'embeddings'], {}), '(index_path, embeddings)\n', (5350, 5374), False, 'from langchain.vectorstores import FAISS\n'), ((5704, 5741), 'os.makedirs', 'os.makedirs', (['"""./index"""'], {'exist_ok': '(True)'}), "('./index', exist_ok=True)\n", (5715, 5741), False, 'import os\n'), ((608, 634), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (624, 634), False, 'import os\n'), ((4627, 4670), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_TYPE"""', '"""openai"""'], {}), "('OPENAI_API_TYPE', 'openai')\n", (4641, 4670), False, 'import os\n'), ((4907, 5176), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': "os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': "os.environ['AZURE_OPENAI_API_KEY']", 'model': "os.environ['AZURE_EMBEDDING_MODEL_NAME']", 'openai_api_base': "os.environ['AZURE_OPENAI_API_BASE_URL']", 'openai_api_type': '"""azure"""'}), "(deployment=os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME'],\n    openai_api_key=os.environ['AZURE_OPENAI_API_KEY'], model=os.environ[\n    'AZURE_EMBEDDING_MODEL_NAME'], openai_api_base=os.environ[\n    'AZURE_OPENAI_API_BASE_URL'], openai_api_type='azure')\n", (4923, 5176), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((774, 805), 'logging.debug', 'logging.debug', (['"""Loading PDF..."""'], {}), "('Loading PDF...')\n", (787, 805), False, 'import logging\n'), ((3204, 3252), 'logging.error', 'logging.error', (['f"""Error loading file: {filename}"""'], {}), "(f'Error loading file: {filename}')\n", (3217, 3252), False, 'import logging\n'), ((3265, 3286), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3284, 3286), False, 'import traceback\n'), ((3993, 4032), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4007, 4032), False, 'import os\n'), ((5549, 5592), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (5569, 5592), False, 'from langchain.vectorstores import FAISS\n'), ((1418, 1479), 'langchain.schema.Document', 'Document', ([], {'page_content': 'pdftext', 'metadata': "{'source': filepath}"}), "(page_content=pdftext, metadata={'source': filepath})\n", (1426, 1479), False, 'from langchain.schema import Document\n'), ((1570, 1602), 'logging.debug', 'logging.debug', (['"""Loading Word..."""'], {}), "('Loading Word...')\n", (1583, 1602), False, 'import logging\n'), ((1714, 1754), 'langchain.document_loaders.UnstructuredWordDocumentLoader', 'UnstructuredWordDocumentLoader', (['filepath'], {}), '(filepath)\n', (1744, 1754), False, 'from langchain.document_loaders import UnstructuredWordDocumentLoader\n'), ((4742, 4781), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4756, 4781), False, 'import os\n'), ((4815, 4866), 'os.environ.get', 'os.environ.get', (['"""OPENAI_EMBEDDING_API_KEY"""', 'api_key'], {}), "('OPENAI_EMBEDDING_API_KEY', api_key)\n", (4829, 4866), False, 'import os\n'), ((1055, 1086), 'modules.pdf_func.parse_pdf', 'parse_pdf', (['filepath', 'two_column'], {}), '(filepath, two_column)\n', (1064, 1086), False, 'from modules.pdf_func import parse_pdf\n'), ((1848, 1886), 'logging.debug', 'logging.debug', (['"""Loading PowerPoint..."""'], {}), "('Loading PowerPoint...')\n", (1861, 1886), False, 'import logging\n'), ((1996, 2034), 'langchain.document_loaders.UnstructuredPowerPointLoader', 'UnstructuredPowerPointLoader', (['filepath'], {}), '(filepath)\n', (2024, 2034), False, 'from langchain.document_loaders import UnstructuredPowerPointLoader\n'), ((1246, 1274), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (1262, 1274), False, 'import PyPDF2\n'), ((1311, 1332), 'tqdm.tqdm', 'tqdm', (['pdfReader.pages'], {}), '(pdfReader.pages)\n', (1315, 1332), False, 'from tqdm import tqdm\n'), ((2128, 2160), 'logging.debug', 'logging.debug', (['"""Loading EPUB..."""'], {}), "('Loading EPUB...')\n", (2141, 2160), False, 'import logging\n'), ((2264, 2296), 'langchain.document_loaders.UnstructuredEPubLoader', 'UnstructuredEPubLoader', (['filepath'], {}), '(filepath)\n', (2286, 2296), False, 'from langchain.document_loaders import UnstructuredEPubLoader\n'), ((2390, 2423), 'logging.debug', 'logging.debug', (['"""Loading Excel..."""'], {}), "('Loading Excel...')\n", (2403, 2423), False, 'import logging\n'), ((2936, 2973), 'logging.debug', 'logging.debug', (['"""Loading text file..."""'], {}), "('Loading text file...')\n", (2949, 2973), False, 'import logging\n'), ((3065, 3093), 'langchain.document_loaders.TextLoader', 'TextLoader', (['filepath', '"""utf8"""'], {}), "(filepath, 'utf8')\n", (3075, 3093), False, 'from langchain.document_loaders import TextLoader\n'), ((2577, 2635), 'langchain.schema.Document', 'Document', ([], {'page_content': 'elem', 'metadata': "{'source': filepath}"}), "(page_content=elem, metadata={'source': filepath})\n", (2585, 2635), False, 'from langchain.schema import Document\n')] | 
| 
	import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answer
Thought: you should always think about what to do
Action: Exactly only one word out of: {tool_names}
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
FORMAT_INSTRUCTIONS = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you.  For example user might ask: What is the latest news?
Here is an example sequence you can follow:
Thought: I should search online for the latest news.
Action: Search
Action Input: What is the latest news?
Observation: X is going away.  Z is again happening.
Thought: That is interesting, I should search for more information about X and Z and also search about Q.
Action: Search
Action Input: How is X impacting things.  Why is Z happening again, and what are the consequences?
Observation: X is causing Y.  Z may be caused by P and will lead to H.
Thought: I now know the final answer
Final Answer: The latest news is:
* X is going away, and this is caused by Y.
* Z is happening again, and the cause is P and will lead to H.
Overall, X and Z are important problems.
"""
FORMAT_INSTRUCTIONS_PYTHON = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you.  For example user might ask: How many rows are in the dataset?
Here is an example sequence you can follow.  You can repeat Thoughts, but as soon as possible you should try to answer the original user question.  Once you an answer the user question, just say: Thought: I now know the final answer
Thought: I should use python_repl_ast tool.
Action: python_repl_ast
Action Input: df.shape
Observation: (25, 10)
Thought: I now know the final answer
Final Answer: There are 25 rows in the dataset.
"""
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
    "Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
    "Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
    "Parsing LLM output produced both a final answer and a parse-able action:"
)
class H2OMRKLOutputParser(MRKLOutputParser):
    """MRKL Output parser for the chat agent."""
    def get_format_instructions(self) -> str:
        return FORMAT_INSTRUCTIONS
    def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
        includes_answer = FINAL_ANSWER_ACTION in text
        regex = (
            r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
        )
        action_match = re.search(regex, text, re.DOTALL)
        if includes_answer:
            return AgentFinish(
                {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
            )
        elif action_match:
            action = action_match.group(1).strip()
            action_input = action_match.group(2)
            tool_input = action_input.strip(" ")
            # ensure if its a well formed SQL query we don't remove any trailing " chars
            if tool_input.startswith("SELECT ") is False:
                tool_input = tool_input.strip('"')
            return AgentAction(action, tool_input, text)
        if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
            raise OutputParserException(
                f"Could not parse LLM output: `{text}`",
                observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
                llm_output=text,
                send_to_llm=True,
            )
        elif not re.search(
            r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
        ):
            raise OutputParserException(
                f"Could not parse LLM output: `{text}`",
                observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
                llm_output=text,
                send_to_llm=True,
            )
        else:
            raise OutputParserException(f"Could not parse LLM output: `{text}`")
    @property
    def _type(self) -> str:
        return "mrkl"
class H2OPythonMRKLOutputParser(H2OMRKLOutputParser):
    def get_format_instructions(self) -> str:
        return FORMAT_INSTRUCTIONS_PYTHON
 | 
	[
  "langchain.schema.AgentAction",
  "langchain.schema.OutputParserException"
] | 
	[((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DOTALL)\n", (3698, 3749), False, 'import re\n'), ((3766, 3928), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n    MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text,\n    send_to_llm=True)\n", (3787, 3928), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((3635, 3672), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'tool_input', 'text'], {}), '(action, tool_input, text)\n', (3646, 3672), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4016, 4103), 're.search', 're.search', (['"""[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)"""', 'text', 're.DOTALL'], {}), "('[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)', text, re.\n    DOTALL)\n", (4025, 4103), False, 'import re\n'), ((4133, 4300), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n    MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text,\n    send_to_llm=True)\n", (4154, 4300), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4403, 4465), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (4424, 4465), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] | 
| 
	import os
import re
import uuid
import cv2
import torch
import requests
import io, base64
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. 
TOOLS:
------
Visual ChatGPT  has access to the following tools:"""
VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
You will remember to provide the image file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination.
The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human. 
Thought: Do I need to use a tool? {agent_scratchpad}"""
ENDPOINT = "http://localhost:7860"
T2IAPI = ENDPOINT + "/controlnet/txt2img"
DETECTAPI = ENDPOINT + "/controlnet/detect"
MODELLIST = ENDPOINT + "/controlnet/model_list"
device = "cpu"
if torch.cuda.is_available():
    device = "cuda"
def readImage(path):
    img = cv2.imread(path)
    retval, buffer = cv2.imencode('.jpg', img)
    b64img = base64.b64encode(buffer).decode("utf-8")
    return b64img
def get_model(pattern='^control_canny.*'):
    r = requests.get(MODELLIST)
    result = r.json()["model_list"]
    for item in result:
        if re.match(pattern, item):
            return item
def do_webui_request(url=T2IAPI, **kwargs):
    reqbody = {
        "prompt": "best quality, extremely detailed",
        "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
        "seed": -1,
        "subseed": -1,
        "subseed_strength": 0,
        "batch_size": 1,
        "n_iter": 1,
        "steps": 15,
        "cfg_scale": 7,
        "width": 512,
        "height": 768,
        "restore_faces": True,
        "eta": 0,
        "sampler_index": "Euler a",
        "controlnet_input_images": [],
        "controlnet_module": 'canny',
        "controlnet_model": 'control_canny-fp16 [e3fe7712]',
        "controlnet_guidance": 1.0,
    }
    reqbody.update(kwargs)
    r = requests.post(url, json=reqbody)
    return r.json()
    
def cut_dialogue_history(history_memory, keep_last_n_words=500):
    tokens = history_memory.split()
    n_tokens = len(tokens)
    print(f"hitory_memory:{history_memory}, n_tokens: {n_tokens}")
    if n_tokens < keep_last_n_words:
        return history_memory
    else:
        paragraphs = history_memory.split('\n')
        last_n_tokens = n_tokens
        while last_n_tokens >= keep_last_n_words:
            last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
            paragraphs = paragraphs[1:]
        return '\n' + '\n'.join(paragraphs)
def get_new_image_name(org_img_name, func_name="update"):
    head_tail = os.path.split(org_img_name)
    head = head_tail[0]
    tail = head_tail[1]
    name_split = tail.split('.')[0].split('_')
    this_new_uuid = str(uuid.uuid4())[0:4]
    if len(name_split) == 1:
        most_org_file_name = name_split[0]
        recent_prev_file_name = name_split[0]
        new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
    else:
        assert len(name_split) == 4
        most_org_file_name = name_split[3]
        recent_prev_file_name = name_split[0]
        new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
    return os.path.join(head, new_file_name)
class MaskFormer:
    def __init__(self, device):
        self.device = device
        self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
        self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
    def inference(self, image_path, text):
        threshold = 0.5
        min_area = 0.02
        padding = 20
        original_image = Image.open(image_path)
        image = original_image.resize((512, 512))
        inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt",).to(self.device)
        with torch.no_grad():
            outputs = self.model(**inputs)
        mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
        area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
        if area_ratio < min_area:
            return None
        true_indices = np.argwhere(mask)
        mask_array = np.zeros_like(mask, dtype=bool)
        for idx in true_indices:
            padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
            mask_array[padded_slice] = True
        visual_mask = (mask_array * 255).astype(np.uint8)
        image_mask = Image.fromarray(visual_mask)
        return image_mask.resize(image.size)
    
# class ImageEditing:
#     def __init__(self, device):
#         print("Initializing StableDiffusionInpaint to %s" % device)
#         self.device = device
#         self.mask_former = MaskFormer(device=self.device)
#         # self.inpainting = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",).to(device)
#     def remove_part_of_image(self, input):
#         image_path, to_be_removed_txt = input.split(",")
#         print(f'remove_part_of_image: to_be_removed {to_be_removed_txt}')
#         return self.replace_part_of_image(f"{image_path},{to_be_removed_txt},background")
#     def replace_part_of_image(self, input):
#         image_path, to_be_replaced_txt, replace_with_txt = input.split(",")
#         print(f'replace_part_of_image: replace_with_txt {replace_with_txt}')
#         mask_image = self.mask_former.inference(image_path, to_be_replaced_txt)
#         buffered = io.BytesIO()
#         mask_image.save(buffered, format="JPEG")
#         resp = do_webui_request(
#             url=ENDPOINT + "/sdapi/v1/img2img",
#             init_images=[readImage(image_path)],
#             mask=base64.b64encode(buffered.getvalue()).decode("utf-8"),
#             prompt=replace_with_txt,
#         )
#         updated_image_path = get_new_image_name(image_path, func_name="replace-something")
#         with open(updated_image_path, 'wb') as f:
#             f.write(base64.b64decode(resp['images'][0]))
#         return updated_image_path
# class Pix2Pix:
#     def __init__(self, device):
#         print("Initializing Pix2Pix to %s" % device)
#         self.device = device
#         self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None).to(device)
#         self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
#     def inference(self, inputs):
#         """Change style of image."""
#         print("===>Starting Pix2Pix Inference")
#         image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
#         original_image = Image.open(image_path)
#         image = self.pipe(instruct_text,image=original_image,num_inference_steps=40,image_guidance_scale=1.2,).images[0]
#         updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
#         image.save(updated_image_path)
#         return updated_image_path
class T2I:
    def __init__(self, device):
        print("Initializing T2I to %s" % device)
        self.device = device
        self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
        
    def inference(self, text):
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
        print(f'{text} refined to {refined_text}')
        resp = do_webui_request(
            url=ENDPOINT + "/sdapi/v1/txt2img",
            prompt=refined_text,
        )
        with open(image_filename, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
        return image_filename
class ImageCaptioning:
    def __init__(self, device):
        print("Initializing ImageCaptioning to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
        self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
    def inference(self, image_path):
        inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        captions = self.processor.decode(out[0], skip_special_tokens=True)
        return captions
    
    
class image2canny:
    def inference(self, inputs):
        print("===>Starting image2canny Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="segmentation",
        )
        updated_image_path = get_new_image_name(inputs, func_name="edge")
        image.save(updated_image_path)
        return updated_image_path
class canny2image:
    def inference(self, inputs):
        print("===>Starting canny2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_canny.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="canny2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2line:
    def inference(self, inputs):
        print("===>Starting image2hough Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="mlsd",
        )
        updated_image_path = get_new_image_name(inputs, func_name="line-of")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class line2image:
    def inference(self, inputs):
        print("===>Starting line2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_mlsd.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="line2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2hed:
    def inference(self, inputs):
        print("===>Starting image2hed Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="hed",
        )
        updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class hed2image:
    def inference(self, inputs):
        print("===>Starting hed2image Inference")
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_hed.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="hed2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2scribble:
    def inference(self, inputs):
        print("===>Starting image2scribble Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="scribble",
        )
        updated_image_path = get_new_image_name(inputs, func_name="scribble")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class scribble2image:
    def inference(self, inputs):
        print("===>Starting seg2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_scribble.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
    
    
class image2pose:
    def inference(self, inputs):
        print("===>Starting image2pose Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="openpose",
        )
        updated_image_path = get_new_image_name(inputs, func_name="human-pose")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class pose2image:
    def inference(self, inputs):
        print("===>Starting pose2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_openpose.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="pose2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2seg:
    def inference(self, inputs):
        print("===>Starting image2seg Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="segmentation",
        )
        updated_image_path = get_new_image_name(inputs, func_name="segmentation")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class seg2image:
    def inference(self, inputs):
        print("===>Starting seg2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_seg.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="segment2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2depth:
    def inference(self, inputs):
        print("===>Starting image2depth Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="depth",
        )
        updated_image_path = get_new_image_name(inputs, func_name="depth")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class depth2image:
    def inference(self, inputs):
        print("===>Starting depth2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="depth",
            controlnet_model=get_model(pattern='^control_depth.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="depth2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2normal:
    def inference(self, inputs):
        print("===>Starting image2 normal Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="normal",
        )
        updated_image_path = get_new_image_name(inputs, func_name="normal-map")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class normal2image:
    def inference(self, inputs):
        print("===>Starting normal2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="normal",
            controlnet_model=get_model(pattern='^control_normal.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="normal2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class BLIPVQA:
    def __init__(self, device):
        print("Initializing BLIP VQA to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
        self.model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(self.device)
    def get_answer_from_question_and_image(self, inputs):
        image_path, question = inputs.split(",")
        raw_image = Image.open(image_path).convert('RGB')
        print(F'BLIPVQA :question :{question}')
        inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        answer = self.processor.decode(out[0], skip_special_tokens=True)
        return answer
class ConversationBot:
    def __init__(self):
        print("Initializing VisualChatGPT")
        # self.edit = ImageEditing(device=device)
        self.i2t = ImageCaptioning(device=device)
        self.t2i = T2I(device=device)
        self.image2canny = image2canny()
        self.canny2image = canny2image()
        self.image2line = image2line()
        self.line2image = line2image()
        self.image2hed = image2hed()
        self.hed2image = hed2image()
        self.image2scribble = image2scribble()
        self.scribble2image = scribble2image()
        self.image2pose = image2pose()
        self.pose2image = pose2image()
        self.BLIPVQA = BLIPVQA(device=device)
        self.image2seg = image2seg()
        self.seg2image = seg2image()
        self.image2depth = image2depth()
        self.depth2image = depth2image()
        self.image2normal = image2normal()
        self.normal2image = normal2image()
        # self.pix2pix = Pix2Pix(device="cuda:3")
        self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
        self.tools = [
            Tool(name="Get Photo Description", func=self.i2t.inference,
                 description="useful when you want to know what is inside the photo. receives image_path as input. "
                             "The input to this tool should be a string, representing the image_path. "),
            Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
                 description="useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
                             "The input to this tool should be a string, representing the text used to generate image. "),
            # Tool(name="Remove Something From The Photo", func=self.edit.remove_part_of_image,
            #      description="useful when you want to remove and object or something from the photo from its description or location. "
            #                  "The input to this tool should be a comma seperated string of two, representing the image_path and the object need to be removed. "),
            # Tool(name="Replace Something From The Photo", func=self.edit.replace_part_of_image,
            #      description="useful when you want to replace an object from the object description or location with another object from its description. "
            #                  "The input to this tool should be a comma seperated string of three, representing the image_path, the object to be replaced, the object to be replaced with "),
            # Tool(name="Instruct Image Using Text", func=self.pix2pix.inference,
            #      description="useful when you want to the style of the image to be like the text. like: make it look like a painting. or make it like a robot. "
            #                  "The input to this tool should be a comma seperated string of two, representing the image_path and the text. "),
            Tool(name="Answer Question About The Image", func=self.BLIPVQA.get_answer_from_question_and_image,
                 description="useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the question"),
            Tool(name="Edge Detection On Image", func=self.image2canny.inference,
                 description="useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Canny Image", func=self.canny2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
            Tool(name="Line Detection On Image", func=self.image2line.inference,
                 description="useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Line Image", func=self.line2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
            Tool(name="Hed Detection On Image", func=self.image2hed.inference,
                 description="useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Soft Hed Boundary Image", func=self.hed2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Segmentation On Image", func=self.image2seg.inference,
                 description="useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Segmentations", func=self.seg2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Predict Depth On Image", func=self.image2depth.inference,
                 description="useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Depth",  func=self.depth2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Predict Normal Map On Image", func=self.image2normal.inference,
                 description="useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Normal Map", func=self.normal2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Sketch Detection On Image", func=self.image2scribble.inference,
                 description="useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Sketch Image", func=self.scribble2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Pose Detection On Image", func=self.image2pose.inference,
                 description="useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Pose Image", func=self.pose2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")]
        
    def init_langchain(self, openai_api_key):
        self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
        self.agent = initialize_agent(
            self.tools,
            self.llm,
            agent="conversational-react-description",
            verbose=True,
            memory=self.memory,
            return_intermediate_steps=True,
            agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}
        )
    def run_text(self, openai_api_key, text, state):
        if not hasattr(self, "agent"):
            self.init_langchain(openai_api_key)
        print("===============Running run_text =============")
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        print("======>Current memory:\n %s" % self.agent.memory)
        response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
        state = state + [(text, response)]
        print("Outputs:", state)
        return state, state
    def run_image(self, openai_api_key, image, state, txt):
        if not hasattr(self, "agent"):
            self.init_langchain(openai_api_key)
        print("===============Running run_image =============")
        print("Inputs:", image, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        print("======>Auto Resize Image...")
        img = Image.open(image.name)
        width, height = img.size
        ratio = min(512 / width, 512 / height)
        width_new, height_new = (round(width * ratio), round(height * ratio))
        img = img.resize((width_new, height_new))
        img = img.convert('RGB')
        img.save(image_filename, "PNG")
        print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
        description = self.i2t.inference(image_filename)
        Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
                       "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
        AI_prompt = "Received.  "
        self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
        print("======>Current memory:\n %s" % self.agent.memory)
        state = state + [(f"*{image_filename}*", AI_prompt)]
        print("Outputs:", state)
        return state, state, txt + ' ' + image_filename + ' '
if __name__ == '__main__':
    os.makedirs("image/", exist_ok=True)
    bot = ConversationBot()
    with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
        openai_api_key = gr.Textbox(type="password", label="Enter your OpenAI API key here")       
        chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
        state = gr.State([])
        with gr.Row():
            with gr.Column(scale=0.7):
                txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
            with gr.Column(scale=0.15, min_width=0):
                clear = gr.Button("Clear️")
            with gr.Column(scale=0.15, min_width=0):
                btn = gr.UploadButton("Upload", file_types=["image"])
                
        txt.submit(bot.run_text, [openai_api_key, txt, state], [chatbot, state])
        txt.submit(lambda: "", None, txt)
        btn.upload(bot.run_image, [openai_api_key, btn, state, txt], [chatbot, state, txt])
        clear.click(bot.memory.clear)
        clear.click(lambda: [], None, chatbot)
        clear.click(lambda: [], None, state)
    
    
    demo.launch(server_name="0.0.0.0", server_port=7864) | 
	[
  "langchain.llms.openai.OpenAI",
  "langchain.agents.tools.Tool",
  "langchain.chains.conversation.memory.ConversationBufferMemory",
  "langchain.agents.initialize.initialize_agent"
] | 
	[((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg', img)\n", (3941, 3954), False, 'import cv2\n'), ((4079, 4102), 'requests.get', 'requests.get', (['MODELLIST'], {}), '(MODELLIST)\n', (4091, 4102), False, 'import requests\n'), ((5000, 5032), 'requests.post', 'requests.post', (['url'], {'json': 'reqbody'}), '(url, json=reqbody)\n', (5013, 5032), False, 'import requests\n'), ((5695, 5722), 'os.path.split', 'os.path.split', (['org_img_name'], {}), '(org_img_name)\n', (5708, 5722), False, 'import os\n'), ((6361, 6394), 'os.path.join', 'os.path.join', (['head', 'new_file_name'], {}), '(head, new_file_name)\n', (6373, 6394), False, 'import os\n'), ((35612, 35648), 'os.makedirs', 'os.makedirs', (['"""image/"""'], {'exist_ok': '(True)'}), "('image/', exist_ok=True)\n", (35623, 35648), False, 'import os\n'), ((4174, 4197), 're.match', 're.match', (['pattern', 'item'], {}), '(pattern, item)\n', (4182, 4197), False, 'import re\n'), ((6500, 6562), 'transformers.CLIPSegProcessor.from_pretrained', 'CLIPSegProcessor.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6532, 6562), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((6807, 6829), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6817, 6829), False, 'from PIL import Image\n'), ((7306, 7323), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7317, 7323), True, 'import numpy as np\n'), ((7345, 7376), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (7358, 7376), True, 'import numpy as np\n'), ((7624, 7652), 'PIL.Image.fromarray', 'Image.fromarray', (['visual_mask'], {}), '(visual_mask)\n', (7639, 7652), False, 'from PIL import Image\n'), ((10312, 10384), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10341, 10384), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10418, 10497), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10454, 10497), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10535, 10655), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n    text_refine_tokenizer, device=self.device)\n", (10543, 10655), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((11412, 11482), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (11441, 11482), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21185, 21242), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21214, 21242), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((22789, 22861), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (22813, 22861), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((32769, 32821), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), '(temperature=0, openai_api_key=openai_api_key)\n', (32775, 32821), False, 'from langchain.llms.openai import OpenAI\n'), ((32843, 33146), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions':\n    VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    VISUAL_CHATGPT_PREFIX, 'format_instructions':\n    VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX})\n", (32859, 33146), False, 'from langchain.agents.initialize import initialize_agent\n'), ((34415, 34437), 'PIL.Image.open', 'Image.open', (['image.name'], {}), '(image.name)\n', (34425, 34437), False, 'from PIL import Image\n'), ((35686, 35742), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (35695, 35742), True, 'import gradio as gr\n'), ((35777, 35844), 'gradio.Textbox', 'gr.Textbox', ([], {'type': '"""password"""', 'label': '"""Enter your OpenAI API key here"""'}), "(type='password', label='Enter your OpenAI API key here')\n", (35787, 35844), True, 'import gradio as gr\n'), ((35870, 35923), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""Visual ChatGPT"""'}), "(elem_id='chatbot', label='Visual ChatGPT')\n", (35880, 35923), True, 'import gradio as gr\n'), ((35940, 35952), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (35948, 35952), True, 'import gradio as gr\n'), ((3968, 3992), 'base64.b64encode', 'base64.b64encode', (['buffer'], {}), '(buffer)\n', (3984, 3992), False, 'import io, base64\n'), ((5842, 5854), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5852, 5854), False, 'import uuid\n'), ((7010, 7025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7023, 7025), False, 'import torch\n'), ((22897, 23139), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n    'useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n    )\n", (22901, 23139), False, 'from langchain.agents.tools import Tool\n'), ((23192, 23572), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n    description=\n    'useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n    )\n", (23196, 23572), False, 'from langchain.agents.tools import Tool\n'), ((24857, 25269), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Answer Question About The Image"""', 'func': 'self.BLIPVQA.get_answer_from_question_and_image', 'description': '"""useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question"""'}), "(name='Answer Question About The Image', func=self.BLIPVQA.\n    get_answer_from_question_and_image, description=\n    'useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question'\n    )\n", (24861, 25269), False, 'from langchain.agents.tools import Tool\n'), ((25317, 25688), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Edge Detection On Image"""', 'func': 'self.image2canny.inference', 'description': '"""useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Edge Detection On Image', func=self.image2canny.inference,\n    description=\n    'useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (25321, 25688), False, 'from langchain.agents.tools import Tool\n'), ((25737, 26224), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Canny Image"""', 'func': 'self.canny2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Canny Image', func=self.canny2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n    )\n", (25741, 26224), False, 'from langchain.agents.tools import Tool\n'), ((26272, 26685), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Line Detection On Image"""', 'func': 'self.image2line.inference', 'description': '"""useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Line Detection On Image', func=self.image2line.inference,\n    description=\n    'useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (26276, 26685), False, 'from langchain.agents.tools import Tool\n'), ((26734, 27239), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Line Image"""', 'func': 'self.line2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Line Image', func=self.line2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n    )\n", (26738, 27239), False, 'from langchain.agents.tools import Tool\n'), ((27287, 27703), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Hed Detection On Image"""', 'func': 'self.image2hed.inference', 'description': '"""useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Hed Detection On Image', func=self.image2hed.inference,\n    description=\n    'useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (27291, 27703), False, 'from langchain.agents.tools import Tool\n'), ((27752, 28273), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Soft Hed Boundary Image"""', 'func': 'self.hed2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Soft Hed Boundary Image', func=self.\n    hed2image.inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (27756, 28273), False, 'from langchain.agents.tools import Tool\n'), ((28321, 28650), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Segmentation On Image"""', 'func': 'self.image2seg.inference', 'description': '"""useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Segmentation On Image', func=self.image2seg.inference,\n    description=\n    'useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path'\n    )\n", (28325, 28650), False, 'from langchain.agents.tools import Tool\n'), ((28699, 29195), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Segmentations"""', 'func': 'self.seg2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Segmentations', func=self.seg2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (28703, 29195), False, 'from langchain.agents.tools import Tool\n'), ((29243, 29580), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Depth On Image"""', 'func': 'self.image2depth.inference', 'description': '"""useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Depth On Image', func=self.image2depth.inference,\n    description=\n    'useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path'\n    )\n", (29247, 29580), False, 'from langchain.agents.tools import Tool\n'), ((29629, 30104), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Depth"""', 'func': 'self.depth2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Depth', func=self.depth2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (29633, 30104), False, 'from langchain.agents.tools import Tool\n'), ((30153, 30461), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Normal Map On Image"""', 'func': 'self.image2normal.inference', 'description': '"""useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Normal Map On Image', func=self.image2normal.inference,\n    description=\n    'useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (30157, 30461), False, 'from langchain.agents.tools import Tool\n'), ((30510, 30990), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Normal Map"""', 'func': 'self.normal2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Normal Map', func=self.normal2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (30514, 30990), False, 'from langchain.agents.tools import Tool\n'), ((31038, 31384), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sketch Detection On Image"""', 'func': 'self.image2scribble.inference', 'description': '"""useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Sketch Detection On Image', func=self.image2scribble.inference,\n    description=\n    'useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path'\n    )\n", (31042, 31384), False, 'from langchain.agents.tools import Tool\n'), ((31433, 31791), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Sketch Image"""', 'func': 'self.scribble2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Sketch Image', func=self.\n    scribble2image.inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (31437, 31791), False, 'from langchain.agents.tools import Tool\n'), ((31839, 32151), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Pose Detection On Image"""', 'func': 'self.image2pose.inference', 'description': '"""useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Pose Detection On Image', func=self.image2pose.inference,\n    description=\n    'useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path'\n    )\n", (31843, 32151), False, 'from langchain.agents.tools import Tool\n'), ((32200, 32659), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Pose Image"""', 'func': 'self.pose2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Pose Image', func=self.pose2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (32204, 32659), False, 'from langchain.agents.tools import Tool\n'), ((35966, 35974), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (35972, 35974), True, 'import gradio as gr\n'), ((6584, 6657), 'transformers.CLIPSegForImageSegmentation.from_pretrained', 'CLIPSegForImageSegmentation.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6627, 6657), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((7172, 7189), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7183, 7189), True, 'import numpy as np\n'), ((11089, 11124), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (11105, 11124), False, 'import io, base64\n'), ((11504, 11594), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n    'Salesforce/blip-image-captioning-base')\n", (11548, 11594), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((12911, 12946), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (12927, 12946), False, 'import io, base64\n'), ((13400, 13435), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (13416, 13435), False, 'import io, base64\n'), ((14063, 14098), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14079, 14098), False, 'import io, base64\n'), ((14553, 14588), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14569, 14588), False, 'import io, base64\n'), ((15122, 15157), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15138, 15157), False, 'import io, base64\n'), ((15623, 15658), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15639, 15658), False, 'import io, base64\n'), ((16297, 16332), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16313, 16332), False, 'import io, base64\n'), ((16800, 16835), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16816, 16835), False, 'import io, base64\n'), ((17467, 17502), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17483, 17502), False, 'import io, base64\n'), ((17966, 18001), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17982, 18001), False, 'import io, base64\n'), ((18629, 18664), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (18645, 18664), False, 'import io, base64\n'), ((19118, 19153), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19134, 19153), False, 'import io, base64\n'), ((19786, 19821), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19802, 19821), False, 'import io, base64\n'), ((20284, 20319), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20300, 20319), False, 'import io, base64\n'), ((20957, 20992), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20973, 20992), False, 'import io, base64\n'), ((21264, 21332), 'transformers.BlipForQuestionAnswering.from_pretrained', 'BlipForQuestionAnswering.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21304, 21332), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21477, 21499), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (21487, 21499), False, 'from PIL import Image\n'), ((35993, 36013), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (36002, 36013), True, 'import gradio as gr\n'), ((36168, 36202), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36177, 36202), True, 'import gradio as gr\n'), ((36228, 36247), 'gradio.Button', 'gr.Button', (['"""Clear️"""'], {}), "('Clear️')\n", (36237, 36247), True, 'import gradio as gr\n'), ((36265, 36299), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36274, 36299), True, 'import gradio as gr\n'), ((36323, 36370), 'gradio.UploadButton', 'gr.UploadButton', (['"""Upload"""'], {'file_types': "['image']"}), "('Upload', file_types=['image'])\n", (36338, 36370), True, 'import gradio as gr\n'), ((11676, 11698), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (11686, 11698), False, 'from PIL import Image\n'), ((10742, 10754), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10752, 10754), False, 'import uuid\n'), ((34327, 34339), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34337, 34339), False, 'import uuid\n'), ((36037, 36132), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n    'Enter text and press enter, or upload an image')\n", (36047, 36132), True, 'import gradio as gr\n'), ((7085, 7110), 'torch.sigmoid', 'torch.sigmoid', (['outputs[0]'], {}), '(outputs[0])\n', (7098, 7110), False, 'import torch\n')] | 
| 
	# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# coding: utf-8
import os
import gradio as gr
import random
import torch
import cv2
import re
import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import math
import numpy as np
import argparse
import inspect
import tempfile
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
from diffusers import EulerAncestralDiscreteScheduler
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
# Grounding DINO
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util import box_ops
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
# segment anything
from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
import cv2
import numpy as np
import matplotlib.pyplot as plt
import wget
VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. 
TOOLS:
------
Visual ChatGPT  has access to the following tools:"""
VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
You will remember to provide the image file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination.
The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human. 
Thought: Do I need to use a tool? {agent_scratchpad} Let's think step by step.
"""
VISUAL_CHATGPT_PREFIX_CN = """Visual ChatGPT 旨在能够协助完成范围广泛的文本和视觉相关任务,从回答简单的问题到提供对广泛主题的深入解释和讨论。 Visual ChatGPT 能够根据收到的输入生成类似人类的文本,使其能够进行听起来自然的对话,并提供连贯且与手头主题相关的响应。
Visual ChatGPT 能够处理和理解大量文本和图像。作为一种语言模型,Visual ChatGPT 不能直接读取图像,但它有一系列工具来完成不同的视觉任务。每张图片都会有一个文件名,格式为“image/xxx.png”,Visual ChatGPT可以调用不同的工具来间接理解图片。在谈论图片时,Visual ChatGPT 对文件名的要求非常严格,绝不会伪造不存在的文件。在使用工具生成新的图像文件时,Visual ChatGPT也知道图像可能与用户需求不一样,会使用其他视觉问答工具或描述工具来观察真实图像。 Visual ChatGPT 能够按顺序使用工具,并且忠于工具观察输出,而不是伪造图像内容和图像文件名。如果生成新图像,它将记得提供上次工具观察的文件名。
Human 可能会向 Visual ChatGPT 提供带有描述的新图形。描述帮助 Visual ChatGPT 理解这个图像,但 Visual ChatGPT 应该使用工具来完成以下任务,而不是直接从描述中想象。有些工具将会返回英文描述,但你对用户的聊天应当采用中文。
总的来说,Visual ChatGPT 是一个强大的可视化对话辅助工具,可以帮助处理范围广泛的任务,并提供关于范围广泛的主题的有价值的见解和信息。
工具列表:
------
Visual ChatGPT 可以使用这些工具:"""
VISUAL_CHATGPT_FORMAT_INSTRUCTIONS_CN = """用户使用中文和你进行聊天,但是工具的参数应当使用英文。如果要调用工具,你必须遵循如下格式:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
当你不再需要继续调用工具,而是对观察结果进行总结回复时,你必须使用如下格式:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
VISUAL_CHATGPT_SUFFIX_CN = """你对文件名的正确性非常严格,而且永远不会伪造不存在的文件。
开始!
因为Visual ChatGPT是一个文本语言模型,必须使用工具去观察图片而不是依靠想象。
推理想法和观察结果只对Visual ChatGPT可见,需要记得在最终回复时把重要的信息重复给用户,你只能给用户返回中文句子。我们一步一步思考。在你使用工具时,工具的参数只能是英文。
聊天历史:
{chat_history}
新输入: {input}
Thought: Do I need to use a tool? {agent_scratchpad}
"""
os.makedirs('image', exist_ok=True)
def seed_everything(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    return seed
def prompts(name, description):
    def decorator(func):
        func.name = name
        func.description = description
        return func
    return decorator
def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
    new_size = new_image.size
    old_size = old_image.size
    easy_img = np.array(new_image)
    gt_img_array = np.array(old_image)
    pos_w = (new_size[0] - old_size[0]) // 2
    pos_h = (new_size[1] - old_size[1]) // 2
    kernel_h = cv2.getGaussianKernel(old_size[1], old_size[1] * sigma)
    kernel_w = cv2.getGaussianKernel(old_size[0], old_size[0] * sigma)
    kernel = np.multiply(kernel_h, np.transpose(kernel_w))
    kernel[steps:-steps, steps:-steps] = 1
    kernel[:steps, :steps] = kernel[:steps, :steps] / kernel[steps - 1, steps - 1]
    kernel[:steps, -steps:] = kernel[:steps, -steps:] / kernel[steps - 1, -(steps)]
    kernel[-steps:, :steps] = kernel[-steps:, :steps] / kernel[-steps, steps - 1]
    kernel[-steps:, -steps:] = kernel[-steps:, -steps:] / kernel[-steps, -steps]
    kernel = np.expand_dims(kernel, 2)
    kernel = np.repeat(kernel, 3, 2)
    weight = np.linspace(0, 1, steps)
    top = np.expand_dims(weight, 1)
    top = np.repeat(top, old_size[0] - 2 * steps, 1)
    top = np.expand_dims(top, 2)
    top = np.repeat(top, 3, 2)
    weight = np.linspace(1, 0, steps)
    down = np.expand_dims(weight, 1)
    down = np.repeat(down, old_size[0] - 2 * steps, 1)
    down = np.expand_dims(down, 2)
    down = np.repeat(down, 3, 2)
    weight = np.linspace(0, 1, steps)
    left = np.expand_dims(weight, 0)
    left = np.repeat(left, old_size[1] - 2 * steps, 0)
    left = np.expand_dims(left, 2)
    left = np.repeat(left, 3, 2)
    weight = np.linspace(1, 0, steps)
    right = np.expand_dims(weight, 0)
    right = np.repeat(right, old_size[1] - 2 * steps, 0)
    right = np.expand_dims(right, 2)
    right = np.repeat(right, 3, 2)
    kernel[:steps, steps:-steps] = top
    kernel[-steps:, steps:-steps] = down
    kernel[steps:-steps, :steps] = left
    kernel[steps:-steps, -steps:] = right
    pt_gt_img = easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]]
    gaussian_gt_img = kernel * gt_img_array + (1 - kernel) * pt_gt_img  # gt img with blur img
    gaussian_gt_img = gaussian_gt_img.astype(np.int64)
    easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]] = gaussian_gt_img
    gaussian_img = Image.fromarray(easy_img)
    return gaussian_img
def cut_dialogue_history(history_memory, keep_last_n_words=500):
    if history_memory is None or len(history_memory) == 0:
        return history_memory
    tokens = history_memory.split()
    n_tokens = len(tokens)
    print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
    if n_tokens < keep_last_n_words:
        return history_memory
    paragraphs = history_memory.split('\n')
    last_n_tokens = n_tokens
    while last_n_tokens >= keep_last_n_words:
        last_n_tokens -= len(paragraphs[0].split(' '))
        paragraphs = paragraphs[1:]
    return '\n' + '\n'.join(paragraphs)
def get_new_image_name(org_img_name, func_name="update"):
    head_tail = os.path.split(org_img_name)
    head = head_tail[0]
    tail = head_tail[1]
    name_split = tail.split('.')[0].split('_')
    this_new_uuid = str(uuid.uuid4())[:4]
    if len(name_split) == 1:
        most_org_file_name = name_split[0]
    else:
        assert len(name_split) == 4
        most_org_file_name = name_split[3]
    recent_prev_file_name = name_split[0]
    new_file_name = f'{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png'
    return os.path.join(head, new_file_name)
class InstructPix2Pix:
    def __init__(self, device):
        print(f"Initializing InstructPix2Pix to {device}")
        self.device = device
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
       
        self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix",
                                                                           safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
                                                                           torch_dtype=self.torch_dtype).to(device)
        self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
    @prompts(name="Instruct Image Using Text",
             description="useful when you want to the style of the image to be like the text. "
                         "like: make it look like a painting. or make it like a robot. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the text. ")
    def inference(self, inputs):
        """Change style of image."""
        print("===>Starting InstructPix2Pix Inference")
        image_path, text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        original_image = Image.open(image_path)
        image = self.pipe(text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
        image.save(updated_image_path)
        print(f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class Text2Image:
    def __init__(self, device):
        print(f"Initializing Text2Image to {device}")
        self.device = device
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
                                                            torch_dtype=self.torch_dtype)
        self.pipe.to(device)
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                        'fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image From User Input Text",
             description="useful when you want to generate an image from a user input text and save it to a file. "
                         "like: generate an image of an object or something, or generate an image that includes some objects. "
                         "The input to this tool should be a string, representing the text used to generate image. ")
    def inference(self, text):
        image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
        prompt = text + ', ' + self.a_prompt
        image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0]
        image.save(image_filename)
        print(
            f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}")
        return image_filename
class ImageCaptioning:
    def __init__(self, device):
        print(f"Initializing ImageCaptioning to {device}")
        self.device = device
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
        self.model = BlipForConditionalGeneration.from_pretrained(
            "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(self.device)
    @prompts(name="Get Photo Description",
             description="useful when you want to know what is inside the photo. receives image_path as input. "
                         "The input to this tool should be a string, representing the image_path. ")
    def inference(self, image_path):
        inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
        out = self.model.generate(**inputs)
        captions = self.processor.decode(out[0], skip_special_tokens=True)
        print(f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}")
        return captions
class Image2Canny:
    def __init__(self, device):
        print("Initializing Image2Canny")
        self.low_threshold = 100
        self.high_threshold = 200
    @prompts(name="Edge Detection On Image",
             description="useful when you want to detect the edge of the image. "
                         "like: detect the edges of this image, or canny detection on image, "
                         "or perform edge detection on this image, or detect the canny image of this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        image = np.array(image)
        canny = cv2.Canny(image, self.low_threshold, self.high_threshold)
        canny = canny[:, :, None]
        canny = np.concatenate([canny, canny, canny], axis=2)
        canny = Image.fromarray(canny)
        updated_image_path = get_new_image_name(inputs, func_name="edge")
        canny.save(updated_image_path)
        print(f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}")
        return updated_image_path
class CannyText2Image:
    def __init__(self, device):
        print(f"Initializing CannyText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype)
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                            'fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Canny Image",
             description="useful when you want to generate a new real image from both the user description and a canny image."
                         " like: generate a real image of a object or something from this canny image,"
                         " or generate a new real image of a object or something from this edge image. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description. ")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="canny2image")
        image.save(updated_image_path)
        print(f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, "
              f"Output Text: {updated_image_path}")
        return updated_image_path
class Image2Line:
    def __init__(self, device):
        print("Initializing Image2Line")
        self.detector = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
    @prompts(name="Line Detection On Image",
             description="useful when you want to detect the straight line of the image. "
                         "like: detect the straight lines of this image, or straight line detection on image, "
                         "or perform straight line detection on this image, or detect the straight line image of this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        mlsd = self.detector(image)
        updated_image_path = get_new_image_name(inputs, func_name="line-of")
        mlsd.save(updated_image_path)
        print(f"\nProcessed Image2Line, Input Image: {inputs}, Output Line: {updated_image_path}")
        return updated_image_path
class LineText2Image:
    def __init__(self, device):
        print(f"Initializing LineText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype
        )
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                            'fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Line Image",
             description="useful when you want to generate a new real image from both the user description "
                         "and a straight line image. "
                         "like: generate a real image of a object or something from this straight line image, "
                         "or generate a new real image of a object or something from this straight lines. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description. ")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="line2image")
        image.save(updated_image_path)
        print(f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text: {instruct_text}, "
              f"Output Text: {updated_image_path}")
        return updated_image_path
class Image2Hed:
    def __init__(self, device):
        print("Initializing Image2Hed")
        self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
    @prompts(name="Hed Detection On Image",
             description="useful when you want to detect the soft hed boundary of the image. "
                         "like: detect the soft hed boundary of this image, or hed boundary detection on image, "
                         "or perform hed boundary detection on this image, or detect soft hed boundary image of this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        hed = self.detector(image)
        updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
        hed.save(updated_image_path)
        print(f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}")
        return updated_image_path
class HedText2Image:
    def __init__(self, device):
        print(f"Initializing HedText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype
        )
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                            'fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Soft Hed Boundary Image",
             description="useful when you want to generate a new real image from both the user description "
                         "and a soft hed boundary image. "
                         "like: generate a real image of a object or something from this soft hed boundary image, "
                         "or generate a new real image of a object or something from this hed boundary. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="hed2image")
        image.save(updated_image_path)
        print(f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class Image2Scribble:
    def __init__(self, device):
        print("Initializing Image2Scribble")
        self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
    @prompts(name="Sketch Detection On Image",
             description="useful when you want to generate a scribble of the image. "
                         "like: generate a scribble of this image, or generate a sketch from this image, "
                         "detect the sketch from this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        scribble = self.detector(image, scribble=True)
        updated_image_path = get_new_image_name(inputs, func_name="scribble")
        scribble.save(updated_image_path)
        print(f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble: {updated_image_path}")
        return updated_image_path
class ScribbleText2Image:
    def __init__(self, device):
        print(f"Initializing ScribbleText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype
        )
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                            'fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Sketch Image",
             description="useful when you want to generate a new real image from both the user description and "
                         "a scribble image or a sketch image. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
        image.save(updated_image_path)
        print(f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class Image2Pose:
    def __init__(self, device):
        print("Initializing Image2Pose")
        self.detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
    @prompts(name="Pose Detection On Image",
             description="useful when you want to detect the human pose of the image. "
                         "like: generate human poses of this image, or generate a pose image from this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        pose = self.detector(image)
        updated_image_path = get_new_image_name(inputs, func_name="human-pose")
        pose.save(updated_image_path)
        print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}")
        return updated_image_path
class PoseText2Image:
    def __init__(self, device):
        print(f"Initializing PoseText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype)
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.num_inference_steps = 20
        self.seed = -1
        self.unconditional_guidance_scale = 9.0
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
                            ' fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Pose Image",
             description="useful when you want to generate a new real image from both the user description "
                         "and a human pose image. "
                         "like: generate a real image of a human from this human pose image, "
                         "or generate a new real image of a human from this pose. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="pose2image")
        image.save(updated_image_path)
        print(f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class SegText2Image:
    def __init__(self, device):
        print(f"Initializing SegText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg",
                                                          torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype)
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
                            ' fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Segmentations",
             description="useful when you want to generate a new real image from both the user description and segmentations. "
                         "like: generate a real image of a object or something from this segmentation image, "
                         "or generate a new real image of a object or something from these segmentations. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="segment2image")
        image.save(updated_image_path)
        print(f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class Image2Depth:
    def __init__(self, device):
        print("Initializing Image2Depth")
        self.depth_estimator = pipeline('depth-estimation')
    @prompts(name="Predict Depth On Image",
             description="useful when you want to detect depth of the image. like: generate the depth from this image, "
                         "or detect the depth map on this image, or predict the depth for this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        depth = self.depth_estimator(image)['depth']
        depth = np.array(depth)
        depth = depth[:, :, None]
        depth = np.concatenate([depth, depth, depth], axis=2)
        depth = Image.fromarray(depth)
        updated_image_path = get_new_image_name(inputs, func_name="depth")
        depth.save(updated_image_path)
        print(f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth: {updated_image_path}")
        return updated_image_path
class DepthText2Image:
    def __init__(self, device):
        print(f"Initializing DepthText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained(
            "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype)
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
                            ' fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Depth",
             description="useful when you want to generate a new real image from both the user description and depth image. "
                         "like: generate a real image of a object or something from this depth image, "
                         "or generate a new real image of a object or something from the depth map. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="depth2image")
        image.save(updated_image_path)
        print(f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class Image2Normal:
    def __init__(self, device):
        print("Initializing Image2Normal")
        self.depth_estimator = pipeline("depth-estimation", model="Intel/dpt-hybrid-midas")
        self.bg_threhold = 0.4
    @prompts(name="Predict Normal Map On Image",
             description="useful when you want to detect norm map of the image. "
                         "like: generate normal map from this image, or predict normal map of this image. "
                         "The input to this tool should be a string, representing the image_path")
    def inference(self, inputs):
        image = Image.open(inputs)
        original_size = image.size
        image = self.depth_estimator(image)['predicted_depth'][0]
        image = image.numpy()
        image_depth = image.copy()
        image_depth -= np.min(image_depth)
        image_depth /= np.max(image_depth)
        x = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3)
        x[image_depth < self.bg_threhold] = 0
        y = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=3)
        y[image_depth < self.bg_threhold] = 0
        z = np.ones_like(x) * np.pi * 2.0
        image = np.stack([x, y, z], axis=2)
        image /= np.sum(image ** 2.0, axis=2, keepdims=True) ** 0.5
        image = (image * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
        image = Image.fromarray(image)
        image = image.resize(original_size)
        updated_image_path = get_new_image_name(inputs, func_name="normal-map")
        image.save(updated_image_path)
        print(f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth: {updated_image_path}")
        return updated_image_path
class NormalText2Image:
    def __init__(self, device):
        print(f"Initializing NormalText2Image to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.controlnet = ControlNetModel.from_pretrained(
            "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype)
        self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker'),
            torch_dtype=self.torch_dtype)
        self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.to(device)
        self.seed = -1
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
                            ' fewer digits, cropped, worst quality, low quality'
    @prompts(name="Generate Image Condition On Normal Map",
             description="useful when you want to generate a new real image from both the user description and normal map. "
                         "like: generate a real image of a object or something from this normal map, "
                         "or generate a new real image of a object or something from the normal map. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the user description")
    def inference(self, inputs):
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        image = Image.open(image_path)
        self.seed = random.randint(0, 65535)
        seed_everything(self.seed)
        prompt = f'{instruct_text}, {self.a_prompt}'
        image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
                          guidance_scale=9.0).images[0]
        updated_image_path = get_new_image_name(image_path, func_name="normal2image")
        image.save(updated_image_path)
        print(f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text: {instruct_text}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class VisualQuestionAnswering:
    def __init__(self, device):
        print(f"Initializing VisualQuestionAnswering to {device}")
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
        self.model = BlipForQuestionAnswering.from_pretrained(
            "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype).to(self.device)
    @prompts(name="Answer Question About The Image",
             description="useful when you need an answer for a question based on an image. "
                         "like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
                         "The input to this tool should be a comma separated string of two, representing the image_path and the question")
    def inference(self, inputs):
        image_path, question = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        raw_image = Image.open(image_path).convert('RGB')
        inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device, self.torch_dtype)
        out = self.model.generate(**inputs)
        answer = self.processor.decode(out[0], skip_special_tokens=True)
        print(f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
              f"Output Answer: {answer}")
        return answer
class Segmenting:
    def __init__(self, device):
        print(f"Inintializing Segmentation to {device}")
        self.device = device
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.model_checkpoint_path = os.path.join("checkpoints","sam")
        self.download_parameters()
        self.sam = build_sam(checkpoint=self.model_checkpoint_path).to(device)
        self.sam_predictor = SamPredictor(self.sam)
        self.mask_generator = SamAutomaticMaskGenerator(self.sam)
        
        self.saved_points = []
        self.saved_labels = []
    def download_parameters(self):
        url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
        if not os.path.exists(self.model_checkpoint_path):
            wget.download(url,out=self.model_checkpoint_path)
        
    def show_mask(self, mask: np.ndarray,image: np.ndarray,
                random_color: bool = False, transparency=1) -> np.ndarray:
        
        """Visualize a mask on top of an image.
        Args:
            mask (np.ndarray): A 2D array of shape (H, W).
            image (np.ndarray): A 3D array of shape (H, W, 3).
            random_color (bool): Whether to use a random color for the mask.
        Outputs:
            np.ndarray: A 3D array of shape (H, W, 3) with the mask
            visualized on top of the image.
            transparenccy: the transparency of the segmentation mask
        """
        
        if random_color:
            color = np.concatenate([np.random.random(3)], axis=0)
        else:
            color = np.array([30 / 255, 144 / 255, 255 / 255])
        h, w = mask.shape[-2:]
        mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) * 255
        image = cv2.addWeighted(image, 0.7, mask_image.astype('uint8'), transparency, 0)
        return image
    def show_box(self, box, ax, label):
        x0, y0 = box[0], box[1]
        w, h = box[2] - box[0], box[3] - box[1]
        ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) 
        ax.text(x0, y0, label)
    
    def get_mask_with_boxes(self, image_pil, image, boxes_filt):
        size = image_pil.size
        H, W = size[1], size[0]
        for i in range(boxes_filt.size(0)):
            boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
            boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
            boxes_filt[i][2:] += boxes_filt[i][:2]
        boxes_filt = boxes_filt.cpu()
        transformed_boxes = self.sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(self.device)
        masks, _, _ = self.sam_predictor.predict_torch(
            point_coords = None,
            point_labels = None,
            boxes = transformed_boxes.to(self.device),
            multimask_output = False,
        )
        return masks
    
    def segment_image_with_boxes(self, image_pil, image_path, boxes_filt, pred_phrases):
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        self.sam_predictor.set_image(image)
        masks = self.get_mask_with_boxes(image_pil, image, boxes_filt)
        # draw output image
        for mask in masks:
            image = self.show_mask(mask[0].cpu().numpy(), image, random_color=True, transparency=0.3)
        updated_image_path = get_new_image_name(image_path, func_name="segmentation")
        
        new_image = Image.fromarray(image)
        new_image.save(updated_image_path)
        return updated_image_path
    def set_image(self, img) -> None:
        """Set the image for the predictor."""
        with torch.cuda.amp.autocast():
            self.sam_predictor.set_image(img)
    def show_points(self, coords: np.ndarray, labels: np.ndarray,
                image: np.ndarray) -> np.ndarray:
        """Visualize points on top of an image.
        Args:
            coords (np.ndarray): A 2D array of shape (N, 2).
            labels (np.ndarray): A 1D array of shape (N,).
            image (np.ndarray): A 3D array of shape (H, W, 3).
        Returns:
            np.ndarray: A 3D array of shape (H, W, 3) with the points
            visualized on top of the image.
        """
        pos_points = coords[labels == 1]
        neg_points = coords[labels == 0]
        for p in pos_points:
            image = cv2.circle(
                image, p.astype(int), radius=3, color=(0, 255, 0), thickness=-1)
        for p in neg_points:
            image = cv2.circle(
                image, p.astype(int), radius=3, color=(255, 0, 0), thickness=-1)
        return image
    def segment_image_with_click(self, img, is_positive: bool,
                            evt: gr.SelectData):
                            
        self.sam_predictor.set_image(img)
        self.saved_points.append([evt.index[0], evt.index[1]])
        self.saved_labels.append(1 if is_positive else 0)
        input_point = np.array(self.saved_points)
        input_label = np.array(self.saved_labels)
        
        # Predict the mask
        with torch.cuda.amp.autocast():
            masks, scores, logits = self.sam_predictor.predict(
                point_coords=input_point,
                point_labels=input_label,
                multimask_output=False,
            )
        img = self.show_mask(masks[0], img, random_color=False, transparency=0.3)
        img = self.show_points(input_point, input_label, img)
        return img
    def segment_image_with_coordinate(self, img, is_positive: bool,
                            coordinate: tuple):
        '''
            Args:
                img (numpy.ndarray): the given image, shape: H x W x 3.
                is_positive: whether the click is positive, if want to add mask use True else False.
                coordinate: the position of the click
                          If the position is (x,y), means click at the x-th column and y-th row of the pixel matrix.
                          So x correspond to W, and y correspond to H.
            Output:
                img (PLI.Image.Image): the result image
                result_mask (numpy.ndarray): the result mask, shape: H x W
            Other parameters:
                transparency (float): the transparenccy of the mask
                                      to control he degree of transparency after the mask is superimposed.
                                      if transparency=1, then the masked part will be completely replaced with other colors.
        '''
        self.sam_predictor.set_image(img)
        self.saved_points.append([coordinate[0], coordinate[1]])
        self.saved_labels.append(1 if is_positive else 0)
        input_point = np.array(self.saved_points)
        input_label = np.array(self.saved_labels)
        # Predict the mask
        with torch.cuda.amp.autocast():
            masks, scores, logits = self.sam_predictor.predict(
                point_coords=input_point,
                point_labels=input_label,
                multimask_output=False,
            )
        img = self.show_mask(masks[0], img, random_color=False, transparency=0.3)
        img = self.show_points(input_point, input_label, img)
        img = Image.fromarray(img)
        
        result_mask = masks[0]
        return img, result_mask
    @prompts(name="Segment the Image",
             description="useful when you want to segment all the part of the image, but not segment a certain object."
                         "like: segment all the object in this image, or generate segmentations on this image, "
                         "or segment the image,"
                         "or perform segmentation on this image, "
                         "or segment all the object in this image."
                         "The input to this tool should be a string, representing the image_path")
    def inference_all(self,image_path):
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        masks = self.mask_generator.generate(image)
        plt.figure(figsize=(20,20))
        plt.imshow(image)
        if len(masks) == 0:
            return
        sorted_anns = sorted(masks, key=(lambda x: x['area']), reverse=True)
        ax = plt.gca()
        ax.set_autoscale_on(False)
        polygons = []
        color = []
        for ann in sorted_anns:
            m = ann['segmentation']
            img = np.ones((m.shape[0], m.shape[1], 3))
            color_mask = np.random.random((1, 3)).tolist()[0]
            for i in range(3):
                img[:,:,i] = color_mask[i]
            ax.imshow(np.dstack((img, m)))
        updated_image_path = get_new_image_name(image_path, func_name="segment-image")
        plt.axis('off')
        plt.savefig(
            updated_image_path, 
            bbox_inches="tight", dpi=300, pad_inches=0.0
        )
        return updated_image_path
    
class Text2Box:
    def __init__(self, device):
        print(f"Initializing ObjectDetection to {device}")
        self.device = device
        self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
        self.model_checkpoint_path = os.path.join("checkpoints","groundingdino")
        self.model_config_path = os.path.join("checkpoints","grounding_config.py")
        self.download_parameters()
        self.box_threshold = 0.3
        self.text_threshold = 0.25
        self.grounding = (self.load_model()).to(self.device)
    def download_parameters(self):
        url = "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth"
        if not os.path.exists(self.model_checkpoint_path):
            wget.download(url,out=self.model_checkpoint_path)
        config_url = "https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py"
        if not os.path.exists(self.model_config_path):
            wget.download(config_url,out=self.model_config_path)
    def load_image(self,image_path):
         # load image
        image_pil = Image.open(image_path).convert("RGB")  # load image
        transform = T.Compose(
            [
                T.RandomResize([512], max_size=1333),
                T.ToTensor(),
                T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
        image, _ = transform(image_pil, None)  # 3, h, w
        return image_pil, image
    def load_model(self):
        args = SLConfig.fromfile(self.model_config_path)
        args.device = self.device
        model = build_model(args)
        checkpoint = torch.load(self.model_checkpoint_path, map_location="cpu")
        load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
        print(load_res)
        _ = model.eval()
        return model
    def get_grounding_boxes(self, image, caption, with_logits=True):
        caption = caption.lower()
        caption = caption.strip()
        if not caption.endswith("."):
            caption = caption + "."
        image = image.to(self.device)
        with torch.no_grad():
            outputs = self.grounding(image[None], captions=[caption])
        logits = outputs["pred_logits"].cpu().sigmoid()[0]  # (nq, 256)
        boxes = outputs["pred_boxes"].cpu()[0]  # (nq, 4)
        logits.shape[0]
        # filter output
        logits_filt = logits.clone()
        boxes_filt = boxes.clone()
        filt_mask = logits_filt.max(dim=1)[0] > self.box_threshold
        logits_filt = logits_filt[filt_mask]  # num_filt, 256
        boxes_filt = boxes_filt[filt_mask]  # num_filt, 4
        logits_filt.shape[0]
        # get phrase
        tokenlizer = self.grounding.tokenizer
        tokenized = tokenlizer(caption)
        # build pred
        pred_phrases = []
        for logit, box in zip(logits_filt, boxes_filt):
            pred_phrase = get_phrases_from_posmap(logit > self.text_threshold, tokenized, tokenlizer)
            if with_logits:
                pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
            else:
                pred_phrases.append(pred_phrase)
        return boxes_filt, pred_phrases
    
    def plot_boxes_to_image(self, image_pil, tgt):
        H, W = tgt["size"]
        boxes = tgt["boxes"]
        labels = tgt["labels"]
        assert len(boxes) == len(labels), "boxes and labels must have same length"
        draw = ImageDraw.Draw(image_pil)
        mask = Image.new("L", image_pil.size, 0)
        mask_draw = ImageDraw.Draw(mask)
        # draw boxes and masks
        for box, label in zip(boxes, labels):
            # from 0..1 to 0..W, 0..H
            box = box * torch.Tensor([W, H, W, H])
            # from xywh to xyxy
            box[:2] -= box[2:] / 2
            box[2:] += box[:2]
            # random color
            color = tuple(np.random.randint(0, 255, size=3).tolist())
            # draw
            x0, y0, x1, y1 = box
            x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
            draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
            # draw.text((x0, y0), str(label), fill=color)
            font = ImageFont.load_default()
            if hasattr(font, "getbbox"):
                bbox = draw.textbbox((x0, y0), str(label), font)
            else:
                w, h = draw.textsize(str(label), font)
                bbox = (x0, y0, w + x0, y0 + h)
            # bbox = draw.textbbox((x0, y0), str(label))
            draw.rectangle(bbox, fill=color)
            draw.text((x0, y0), str(label), fill="white")
            mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=2)
        return image_pil, mask
    
    @prompts(name="Detect the Give Object",
             description="useful when you only want to detect or find out given objects in the picture"  
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path, the text description of the object to be found")
    def inference(self, inputs):
        image_path, det_prompt = inputs.split(",")
        print(f"image_path={image_path}, text_prompt={det_prompt}")
        image_pil, image = self.load_image(image_path)
        boxes_filt, pred_phrases = self.get_grounding_boxes(image, det_prompt)
        size = image_pil.size
        pred_dict = {
        "boxes": boxes_filt,
        "size": [size[1], size[0]],  # H,W
        "labels": pred_phrases,}
        image_with_box = self.plot_boxes_to_image(image_pil, pred_dict)[0]
        updated_image_path = get_new_image_name(image_path, func_name="detect-something")
        updated_image = image_with_box.resize(size)
        updated_image.save(updated_image_path)
        print(
            f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, "
            f"Output Image: {updated_image_path}")
        return updated_image_path
class Inpainting:
    def __init__(self, device):
        self.device = device
        self.revision = 'fp16' if 'cuda' in self.device else None
        self.torch_dtype = torch.float16 if 'cuda' in self.device else torch.float32
        self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
            "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype,safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker')).to(device)
    def __call__(self, prompt, image, mask_image, height=512, width=512, num_inference_steps=50):
        update_image = self.inpaint(prompt=prompt, image=image.resize((width, height)),
                                     mask_image=mask_image.resize((width, height)), height=height, width=width, num_inference_steps=num_inference_steps).images[0]
        return update_image
class InfinityOutPainting:
    template_model = True # Add this line to show this is a template model.
    def __init__(self, ImageCaptioning, Inpainting, VisualQuestionAnswering):
        self.llm = OpenAI(temperature=0)
        self.ImageCaption = ImageCaptioning
        self.inpaint = Inpainting
        self.ImageVQA = VisualQuestionAnswering
        self.a_prompt = 'best quality, extremely detailed'
        self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
                        'fewer digits, cropped, worst quality, low quality'
    def get_BLIP_vqa(self, image, question):
        inputs = self.ImageVQA.processor(image, question, return_tensors="pt").to(self.ImageVQA.device,
                                                                                  self.ImageVQA.torch_dtype)
        out = self.ImageVQA.model.generate(**inputs)
        answer = self.ImageVQA.processor.decode(out[0], skip_special_tokens=True)
        print(f"\nProcessed VisualQuestionAnswering, Input Question: {question}, Output Answer: {answer}")
        return answer
    def get_BLIP_caption(self, image):
        inputs = self.ImageCaption.processor(image, return_tensors="pt").to(self.ImageCaption.device,
                                                                                self.ImageCaption.torch_dtype)
        out = self.ImageCaption.model.generate(**inputs)
        BLIP_caption = self.ImageCaption.processor.decode(out[0], skip_special_tokens=True)
        return BLIP_caption
    def check_prompt(self, prompt):
        check = f"Here is a paragraph with adjectives. " \
                f"{prompt} " \
                f"Please change all plural forms in the adjectives to singular forms. "
        return self.llm(check)
    def get_imagine_caption(self, image, imagine):
        BLIP_caption = self.get_BLIP_caption(image)
        background_color = self.get_BLIP_vqa(image, 'what is the background color of this image')
        style = self.get_BLIP_vqa(image, 'what is the style of this image')
        imagine_prompt = f"let's pretend you are an excellent painter and now " \
                         f"there is an incomplete painting with {BLIP_caption} in the center, " \
                         f"please imagine the complete painting and describe it" \
                         f"you should consider the background color is {background_color}, the style is {style}" \
                         f"You should make the painting as vivid and realistic as possible" \
                         f"You can not use words like painting or picture" \
                         f"and you should use no more than 50 words to describe it"
        caption = self.llm(imagine_prompt) if imagine else BLIP_caption
        caption = self.check_prompt(caption)
        print(f'BLIP observation: {BLIP_caption}, ChatGPT imagine to {caption}') if imagine else print(
            f'Prompt: {caption}')
        return caption
    def resize_image(self, image, max_size=1000000, multiple=8):
        aspect_ratio = image.size[0] / image.size[1]
        new_width = int(math.sqrt(max_size * aspect_ratio))
        new_height = int(new_width / aspect_ratio)
        new_width, new_height = new_width - (new_width % multiple), new_height - (new_height % multiple)
        return image.resize((new_width, new_height))
    def dowhile(self, original_img, tosize, expand_ratio, imagine, usr_prompt):
        old_img = original_img
        while (old_img.size != tosize):
            prompt = self.check_prompt(usr_prompt) if usr_prompt else self.get_imagine_caption(old_img, imagine)
            crop_w = 15 if old_img.size[0] != tosize[0] else 0
            crop_h = 15 if old_img.size[1] != tosize[1] else 0
            old_img = ImageOps.crop(old_img, (crop_w, crop_h, crop_w, crop_h))
            temp_canvas_size = (expand_ratio * old_img.width if expand_ratio * old_img.width < tosize[0] else tosize[0],
                                expand_ratio * old_img.height if expand_ratio * old_img.height < tosize[1] else tosize[
                                    1])
            temp_canvas, temp_mask = Image.new("RGB", temp_canvas_size, color="white"), Image.new("L", temp_canvas_size,
                                                                                                  color="white")
            x, y = (temp_canvas.width - old_img.width) // 2, (temp_canvas.height - old_img.height) // 2
            temp_canvas.paste(old_img, (x, y))
            temp_mask.paste(0, (x, y, x + old_img.width, y + old_img.height))
            resized_temp_canvas, resized_temp_mask = self.resize_image(temp_canvas), self.resize_image(temp_mask)
            image = self.inpaint(prompt=prompt, image=resized_temp_canvas, mask_image=resized_temp_mask,
                                              height=resized_temp_canvas.height, width=resized_temp_canvas.width,
                                              num_inference_steps=50).resize(
                (temp_canvas.width, temp_canvas.height), Image.ANTIALIAS)
            image = blend_gt2pt(old_img, image)
            old_img = image
        return old_img
    @prompts(name="Extend An Image",
             description="useful when you need to extend an image into a larger image."
                         "like: extend the image into a resolution of 2048x1024, extend the image into 2048x1024. "
                         "The input to this tool should be a comma separated string of two, representing the image_path and the resolution of widthxheight")
    def inference(self, inputs):
        image_path, resolution = inputs.split(',')
        width, height = resolution.split('x')
        tosize = (int(width), int(height))
        image = Image.open(image_path)
        image = ImageOps.crop(image, (10, 10, 10, 10))
        out_painted_image = self.dowhile(image, tosize, 4, True, False)
        updated_image_path = get_new_image_name(image_path, func_name="outpainting")
        out_painted_image.save(updated_image_path)
        print(f"\nProcessed InfinityOutPainting, Input Image: {image_path}, Input Resolution: {resolution}, "
              f"Output Image: {updated_image_path}")
        return updated_image_path
class ObjectSegmenting:
    template_model = True # Add this line to show this is a template model.
    def __init__(self,  Text2Box:Text2Box, Segmenting:Segmenting):
        # self.llm = OpenAI(temperature=0)
        self.grounding = Text2Box
        self.sam = Segmenting
    @prompts(name="Segment the given object",
            description="useful when you only want to segment the certain objects in the picture"
                        "according to the given text"  
                        "like: segment the cat,"
                        "or can you segment an obeject for me"
                        "The input to this tool should be a comma separated string of two, "
                        "representing the image_path, the text description of the object to be found")
    def inference(self, inputs):
        image_path, det_prompt = inputs.split(",")
        print(f"image_path={image_path}, text_prompt={det_prompt}")
        image_pil, image = self.grounding.load_image(image_path)
        boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt)
        updated_image_path = self.sam.segment_image_with_boxes(image_pil,image_path,boxes_filt,pred_phrases)
        print(
            f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
            f"Output Image: {updated_image_path}")
        return updated_image_path
    def merge_masks(self, masks):
        '''
            Args:
                mask (numpy.ndarray): shape N x 1 x H x W
            Outputs:
                new_mask (numpy.ndarray): shape H x W       
        '''
        if type(masks) == torch.Tensor:
            x = masks
        elif type(masks) == np.ndarray:
            x = torch.tensor(masks,dtype=int)
        else:   
            raise TypeError("the type of the input masks must be numpy.ndarray or torch.tensor")
        x = x.squeeze(dim=1)
        value, _ = x.max(dim=0)
        new_mask = value.cpu().numpy()
        new_mask.astype(np.uint8)
        return new_mask
    
    def get_mask(self, image_path, text_prompt):
        print(f"image_path={image_path}, text_prompt={text_prompt}")
        # image_pil (PIL.Image.Image) -> size: W x H
        # image (numpy.ndarray) -> H x W x 3
        image_pil, image = self.grounding.load_image(image_path)
        boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, text_prompt)
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        self.sam.sam_predictor.set_image(image)
        
        # masks (torch.tensor) -> N x 1 x H x W 
        masks = self.sam.get_mask_with_boxes(image_pil, image, boxes_filt)
        # merged_mask -> H x W
        merged_mask = self.merge_masks(masks)
        # draw output image
        for mask in masks:
            image = self.sam.show_mask(mask[0].cpu().numpy(), image, random_color=True, transparency=0.3)
        merged_mask_image = Image.fromarray(merged_mask)
        return merged_mask
class ImageEditing:
    template_model = True
    def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting, Inpainting:Inpainting):
        print(f"Initializing ImageEditing")
        self.sam = Segmenting
        self.grounding = Text2Box
        self.inpaint = Inpainting
    def pad_edge(self,mask,padding):
        #mask Tensor [H,W]
        mask = mask.numpy()
        true_indices = np.argwhere(mask)
        mask_array = np.zeros_like(mask, dtype=bool)
        for idx in true_indices:
            padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
            mask_array[padded_slice] = True
        new_mask = (mask_array * 255).astype(np.uint8)
        #new_mask
        return new_mask
    @prompts(name="Remove Something From The Photo",
             description="useful when you want to remove and object or something from the photo "
                         "from its description or location. "
                         "The input to this tool should be a comma separated string of two, "
                         "representing the image_path and the object need to be removed. ")    
    def inference_remove(self, inputs):
        image_path, to_be_removed_txt = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        return self.inference_replace_sam(f"{image_path},{to_be_removed_txt},background")
    @prompts(name="Replace Something From The Photo",
            description="useful when you want to replace an object from the object description or "
                        "location with another object from its description. "
                        "The input to this tool should be a comma separated string of three, "
                        "representing the image_path, the object to be replaced, the object to be replaced with ")
    def inference_replace_sam(self,inputs):
        image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
        
        print(f"image_path={image_path}, to_be_replaced_txt={to_be_replaced_txt}")
        image_pil, image = self.grounding.load_image(image_path)
        boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, to_be_replaced_txt)
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        self.sam.sam_predictor.set_image(image)
        masks = self.sam.get_mask_with_boxes(image_pil, image, boxes_filt)
        mask = torch.sum(masks, dim=0).unsqueeze(0)
        mask = torch.where(mask > 0, True, False)
        mask = mask.squeeze(0).squeeze(0).cpu() #tensor
        mask = self.pad_edge(mask,padding=20) #numpy
        mask_image = Image.fromarray(mask)
        updated_image = self.inpaint(prompt=replace_with_txt, image=image_pil,
                                     mask_image=mask_image)
        updated_image_path = get_new_image_name(image_path, func_name="replace-something")
        updated_image = updated_image.resize(image_pil.size)
        updated_image.save(updated_image_path)
        print(
            f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
            f"Output Image: {updated_image_path}")
        return updated_image_path
class BackgroundRemoving:
    '''
        using to remove the background of the given picture
    '''
    template_model = True
    def __init__(self,VisualQuestionAnswering:VisualQuestionAnswering, Text2Box:Text2Box, Segmenting:Segmenting):
        self.vqa = VisualQuestionAnswering
        self.obj_segmenting = ObjectSegmenting(Text2Box,Segmenting)
    @prompts(name="Remove the background",
             description="useful when you want to extract the object or remove the background,"
                         "the input should be a string image_path"
                                )
    def inference(self, image_path):
        '''
            given a image, return the picture only contains the extracted main object
        '''
        updated_image_path = None
        mask = self.get_mask(image_path)
        image = Image.open(image_path)
        mask = Image.fromarray(mask)
        image.putalpha(mask)
        updated_image_path = get_new_image_name(image_path, func_name="detect-something")
        image.save(updated_image_path)
        return updated_image_path
    def get_mask(self, image_path):
        '''
            Description:
                given an image path, return the mask of the main object.
            Args:
                image_path (string): the file path of the image
            Outputs:
                mask (numpy.ndarray): H x W
        '''
        vqa_input = f"{image_path}, what is the main object in the image?"
        text_prompt = self.vqa.inference(vqa_input)
        mask = self.obj_segmenting.get_mask(image_path,text_prompt)
        return mask
class ConversationBot:
    def __init__(self, load_dict):
        # load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...}
        print(f"Initializing VisualChatGPT, load_dict={load_dict}")
        if 'ImageCaptioning' not in load_dict:
            raise ValueError("You have to load ImageCaptioning as a basic function for VisualChatGPT")
        self.models = {}
        # Load Basic Foundation Models
        for class_name, device in load_dict.items():
            self.models[class_name] = globals()[class_name](device=device)
        # Load Template Foundation Models
        for class_name, module in globals().items():
            if getattr(module, 'template_model', False):
                template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
                loaded_names = set([type(e).__name__ for e in self.models.values()])
                if template_required_names.issubset(loaded_names):
                    self.models[class_name] = globals()[class_name](
                        **{name: self.models[name] for name in template_required_names})
        
        print(f"All the Available Functions: {self.models}")
        self.tools = []
        for instance in self.models.values():
            for e in dir(instance):
                if e.startswith('inference'):
                    func = getattr(instance, e)
                    self.tools.append(Tool(name=func.name, description=func.description, func=func))
        self.llm = OpenAI(temperature=0)
        self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
    def init_agent(self, lang):
        self.memory.clear() #clear previous history
        if lang=='English':
            PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_CHATGPT_PREFIX, VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, VISUAL_CHATGPT_SUFFIX
            place = "Enter text and press enter, or upload an image"
            label_clear = "Clear"
        else:
            PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_CHATGPT_PREFIX_CN, VISUAL_CHATGPT_FORMAT_INSTRUCTIONS_CN, VISUAL_CHATGPT_SUFFIX_CN
            place = "输入文字并回车,或者上传图片"
            label_clear = "清除"
        self.agent = initialize_agent(
            self.tools,
            self.llm,
            agent="conversational-react-description",
            verbose=True,
            memory=self.memory,
            return_intermediate_steps=True,
            agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS,
                          'suffix': SUFFIX}, )
        return gr.update(visible = True), gr.update(visible = False), gr.update(placeholder=place), gr.update(value=label_clear)
    def run_text(self, text, state):
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text.strip()})
        res['output'] = res['output'].replace("\\", "/")
        response = re.sub('(image/[-\w]*.png)', lambda m: f'})*{m.group(0)}*', res['output'])
        state = state + [(text, response)]
        print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n"
              f"Current Memory: {self.agent.memory.buffer}")
        return state, state
    def run_image(self, image, state, txt, lang):
        image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
        print("======>Auto Resize Image...")
        img = Image.open(image.name)
        width, height = img.size
        ratio = min(512 / width, 512 / height)
        width_new, height_new = (round(width * ratio), round(height * ratio))
        width_new = int(np.round(width_new / 64.0)) * 64
        height_new = int(np.round(height_new / 64.0)) * 64
        img = img.resize((width_new, height_new))
        img = img.convert('RGB')
        img.save(image_filename, "PNG")
        print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
        description = self.models['ImageCaptioning'].inference(image_filename)
        if lang == 'Chinese':
            Human_prompt = f'\nHuman: 提供一张名为 {image_filename}的图片。它的描述是: {description}。 这些信息帮助你理解这个图像,但是你应该使用工具来完成下面的任务,而不是直接从我的描述中想象。 如果你明白了, 说 \"收到\". \n'
            AI_prompt = "收到。  "
        else:
            Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
            AI_prompt = "Received.  "
        self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
        state = state + [(f"*{image_filename}*", AI_prompt)]
        print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
              f"Current Memory: {self.agent.memory.buffer}")
        return state, state, f'{txt} {image_filename} '
if __name__ == '__main__':
    if not os.path.exists("checkpoints"):
        os.mkdir("checkpoints")
    parser = argparse.ArgumentParser()
    parser.add_argument('--load', type=str, default="ImageCaptioning_cuda:0,Text2Image_cuda:0")
    args = parser.parse_args()
    load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')}
    bot = ConversationBot(load_dict=load_dict)
    with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
        lang = gr.Radio(choices = ['Chinese','English'], value=None, label='Language')
        chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
        state = gr.State([])
        with gr.Row(visible=False) as input_raws:
            with gr.Column(scale=0.7):
                txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(
                    container=False)
            with gr.Column(scale=0.15, min_width=0):
                clear = gr.Button("Clear")
            with gr.Column(scale=0.15, min_width=0):
                btn = gr.UploadButton(label="🖼️",file_types=["image"])
        lang.change(bot.init_agent, [lang], [input_raws, lang, txt, clear])
        txt.submit(bot.run_text, [txt, state], [chatbot, state])
        txt.submit(lambda: "", None, txt)
        btn.upload(bot.run_image, [btn, state, txt, lang], [chatbot, state, txt])
        clear.click(bot.memory.clear)
        clear.click(lambda: [], None, chatbot)
        clear.click(lambda: [], None, state)
    demo.launch(server_name="0.0.0.0", server_port=7861)
 | 
	[
  "langchain.llms.openai.OpenAI",
  "langchain.agents.tools.Tool",
  "langchain.chains.conversation.memory.ConversationBufferMemory",
  "langchain.agents.initialize.initialize_agent"
] | 
	[((6155, 6190), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (6166, 6190), False, 'import os\n'), ((6224, 6241), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6235, 6241), False, 'import random\n'), ((6246, 6266), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6260, 6266), True, 'import numpy as np\n'), ((6271, 6294), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (6288, 6294), False, 'import torch\n'), ((6299, 6331), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (6325, 6331), False, 'import torch\n'), ((6652, 6671), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (6660, 6671), True, 'import numpy as np\n'), ((6691, 6710), 'numpy.array', 'np.array', (['old_image'], {}), '(old_image)\n', (6699, 6710), True, 'import numpy as np\n'), ((6817, 6872), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['old_size[1]', '(old_size[1] * sigma)'], {}), '(old_size[1], old_size[1] * sigma)\n', (6838, 6872), False, 'import cv2\n'), ((6888, 6943), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['old_size[0]', '(old_size[0] * sigma)'], {}), '(old_size[0], old_size[0] * sigma)\n', (6909, 6943), False, 'import cv2\n'), ((7390, 7415), 'numpy.expand_dims', 'np.expand_dims', (['kernel', '(2)'], {}), '(kernel, 2)\n', (7404, 7415), True, 'import numpy as np\n'), ((7429, 7452), 'numpy.repeat', 'np.repeat', (['kernel', '(3)', '(2)'], {}), '(kernel, 3, 2)\n', (7438, 7452), True, 'import numpy as np\n'), ((7467, 7491), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'steps'], {}), '(0, 1, steps)\n', (7478, 7491), True, 'import numpy as np\n'), ((7502, 7527), 'numpy.expand_dims', 'np.expand_dims', (['weight', '(1)'], {}), '(weight, 1)\n', (7516, 7527), True, 'import numpy as np\n'), ((7538, 7580), 'numpy.repeat', 'np.repeat', (['top', '(old_size[0] - 2 * steps)', '(1)'], {}), '(top, old_size[0] - 2 * steps, 1)\n', (7547, 7580), True, 'import numpy as np\n'), ((7591, 7613), 'numpy.expand_dims', 'np.expand_dims', (['top', '(2)'], {}), '(top, 2)\n', (7605, 7613), True, 'import numpy as np\n'), ((7624, 7644), 'numpy.repeat', 'np.repeat', (['top', '(3)', '(2)'], {}), '(top, 3, 2)\n', (7633, 7644), True, 'import numpy as np\n'), ((7659, 7683), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'steps'], {}), '(1, 0, steps)\n', (7670, 7683), True, 'import numpy as np\n'), ((7695, 7720), 'numpy.expand_dims', 'np.expand_dims', (['weight', '(1)'], {}), '(weight, 1)\n', (7709, 7720), True, 'import numpy as np\n'), ((7732, 7775), 'numpy.repeat', 'np.repeat', (['down', '(old_size[0] - 2 * steps)', '(1)'], {}), '(down, old_size[0] - 2 * steps, 1)\n', (7741, 7775), True, 'import numpy as np\n'), ((7787, 7810), 'numpy.expand_dims', 'np.expand_dims', (['down', '(2)'], {}), '(down, 2)\n', (7801, 7810), True, 'import numpy as np\n'), ((7822, 7843), 'numpy.repeat', 'np.repeat', (['down', '(3)', '(2)'], {}), '(down, 3, 2)\n', (7831, 7843), True, 'import numpy as np\n'), ((7858, 7882), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'steps'], {}), '(0, 1, steps)\n', (7869, 7882), True, 'import numpy as np\n'), ((7894, 7919), 'numpy.expand_dims', 'np.expand_dims', (['weight', '(0)'], {}), '(weight, 0)\n', (7908, 7919), True, 'import numpy as np\n'), ((7931, 7974), 'numpy.repeat', 'np.repeat', (['left', '(old_size[1] - 2 * steps)', '(0)'], {}), '(left, old_size[1] - 2 * steps, 0)\n', (7940, 7974), True, 'import numpy as np\n'), ((7986, 8009), 'numpy.expand_dims', 'np.expand_dims', (['left', '(2)'], {}), '(left, 2)\n', (8000, 8009), True, 'import numpy as np\n'), ((8021, 8042), 'numpy.repeat', 'np.repeat', (['left', '(3)', '(2)'], {}), '(left, 3, 2)\n', (8030, 8042), True, 'import numpy as np\n'), ((8057, 8081), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'steps'], {}), '(1, 0, steps)\n', (8068, 8081), True, 'import numpy as np\n'), ((8094, 8119), 'numpy.expand_dims', 'np.expand_dims', (['weight', '(0)'], {}), '(weight, 0)\n', (8108, 8119), True, 'import numpy as np\n'), ((8132, 8176), 'numpy.repeat', 'np.repeat', (['right', '(old_size[1] - 2 * steps)', '(0)'], {}), '(right, old_size[1] - 2 * steps, 0)\n', (8141, 8176), True, 'import numpy as np\n'), ((8189, 8213), 'numpy.expand_dims', 'np.expand_dims', (['right', '(2)'], {}), '(right, 2)\n', (8203, 8213), True, 'import numpy as np\n'), ((8226, 8248), 'numpy.repeat', 'np.repeat', (['right', '(3)', '(2)'], {}), '(right, 3, 2)\n', (8235, 8248), True, 'import numpy as np\n'), ((8746, 8771), 'PIL.Image.fromarray', 'Image.fromarray', (['easy_img'], {}), '(easy_img)\n', (8761, 8771), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((9476, 9503), 'os.path.split', 'os.path.split', (['org_img_name'], {}), '(org_img_name)\n', (9489, 9503), False, 'import os\n'), ((9955, 9988), 'os.path.join', 'os.path.join', (['head', 'new_file_name'], {}), '(head, new_file_name)\n', (9967, 9988), False, 'import os\n'), ((77949, 77974), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (77972, 77974), False, 'import argparse\n'), ((6979, 7001), 'numpy.transpose', 'np.transpose', (['kernel_w'], {}), '(kernel_w)\n', (6991, 7001), True, 'import numpy as np\n'), ((10651, 10722), 'diffusers.EulerAncestralDiscreteScheduler.from_config', 'EulerAncestralDiscreteScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (10694, 10722), False, 'from diffusers import EulerAncestralDiscreteScheduler\n'), ((11353, 11375), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (11363, 11375), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((12029, 12136), 'diffusers.StableDiffusionPipeline.from_pretrained', 'StableDiffusionPipeline.from_pretrained', (['"""runwayml/stable-diffusion-v1-5"""'], {'torch_dtype': 'self.torch_dtype'}), "('runwayml/stable-diffusion-v1-5',\n    torch_dtype=self.torch_dtype)\n", (12068, 12136), False, 'from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline\n'), ((13529, 13599), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (13558, 13599), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((15056, 15074), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (15066, 15074), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((15091, 15106), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15099, 15106), True, 'import numpy as np\n'), ((15123, 15180), 'cv2.Canny', 'cv2.Canny', (['image', 'self.low_threshold', 'self.high_threshold'], {}), '(image, self.low_threshold, self.high_threshold)\n', (15132, 15180), False, 'import cv2\n'), ((15231, 15276), 'numpy.concatenate', 'np.concatenate', (['[canny, canny, canny]'], {'axis': '(2)'}), '([canny, canny, canny], axis=2)\n', (15245, 15276), True, 'import numpy as np\n'), ((15293, 15315), 'PIL.Image.fromarray', 'Image.fromarray', (['canny'], {}), '(canny)\n', (15308, 15315), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((15785, 15900), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-canny"""'], {'torch_dtype': 'self.torch_dtype'}), "('fusing/stable-diffusion-v1-5-controlnet-canny'\n    , torch_dtype=self.torch_dtype)\n", (15816, 15900), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((16275, 16338), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (16310, 16338), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((17345, 17367), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (17355, 17367), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((17388, 17412), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (17402, 17412), False, 'import random\n'), ((18092, 18145), 'controlnet_aux.MLSDdetector.from_pretrained', 'MLSDdetector.from_pretrained', (['"""lllyasviel/ControlNet"""'], {}), "('lllyasviel/ControlNet')\n", (18120, 18145), False, 'from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector\n'), ((18670, 18688), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (18680, 18688), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((19193, 19306), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-mlsd"""'], {'torch_dtype': 'self.torch_dtype'}), "('fusing/stable-diffusion-v1-5-controlnet-mlsd',\n    torch_dtype=self.torch_dtype)\n", (19224, 19306), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((19691, 19754), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (19726, 19754), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((20808, 20830), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (20818, 20830), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((20851, 20875), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (20865, 20875), False, 'import random\n'), ((21550, 21602), 'controlnet_aux.HEDdetector.from_pretrained', 'HEDdetector.from_pretrained', (['"""lllyasviel/ControlNet"""'], {}), "('lllyasviel/ControlNet')\n", (21577, 21602), False, 'from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector\n'), ((22131, 22149), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (22141, 22149), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((22653, 22765), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-hed"""'], {'torch_dtype': 'self.torch_dtype'}), "('fusing/stable-diffusion-v1-5-controlnet-hed',\n    torch_dtype=self.torch_dtype)\n", (22684, 22765), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((23150, 23213), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (23185, 23213), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((24284, 24306), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (24294, 24306), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((24327, 24351), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (24341, 24351), False, 'import random\n'), ((25034, 25086), 'controlnet_aux.HEDdetector.from_pretrained', 'HEDdetector.from_pretrained', (['"""lllyasviel/ControlNet"""'], {}), "('lllyasviel/ControlNet')\n", (25061, 25086), False, 'from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector\n'), ((25539, 25557), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (25549, 25557), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((26102, 26225), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-scribble"""'], {'torch_dtype': 'self.torch_dtype'}), "(\n    'fusing/stable-diffusion-v1-5-controlnet-scribble', torch_dtype=self.\n    torch_dtype)\n", (26133, 26225), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((26604, 26667), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (26639, 26667), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((27514, 27536), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (27524, 27536), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((27557, 27581), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (27571, 27581), False, 'import random\n'), ((28271, 28328), 'controlnet_aux.OpenposeDetector.from_pretrained', 'OpenposeDetector.from_pretrained', (['"""lllyasviel/ControlNet"""'], {}), "('lllyasviel/ControlNet')\n", (28303, 28328), False, 'from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector\n'), ((28723, 28741), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (28733, 28741), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((29249, 29372), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-openpose"""'], {'torch_dtype': 'self.torch_dtype'}), "(\n    'fusing/stable-diffusion-v1-5-controlnet-openpose', torch_dtype=self.\n    torch_dtype)\n", (29280, 29372), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((29742, 29805), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (29777, 29805), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((30899, 30921), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (30909, 30921), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((30942, 30966), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (30956, 30966), False, 'import random\n'), ((31744, 31856), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-seg"""'], {'torch_dtype': 'self.torch_dtype'}), "('fusing/stable-diffusion-v1-5-controlnet-seg',\n    torch_dtype=self.torch_dtype)\n", (31775, 31856), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((32232, 32295), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (32267, 32295), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((33313, 33335), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (33323, 33335), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((33356, 33380), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (33370, 33380), False, 'import random\n'), ((34068, 34096), 'transformers.pipeline', 'pipeline', (['"""depth-estimation"""'], {}), "('depth-estimation')\n", (34076, 34096), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((34515, 34533), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (34525, 34533), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((34603, 34618), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (34611, 34618), True, 'import numpy as np\n'), ((34669, 34714), 'numpy.concatenate', 'np.concatenate', (['[depth, depth, depth]'], {'axis': '(2)'}), '([depth, depth, depth], axis=2)\n', (34683, 34714), True, 'import numpy as np\n'), ((34731, 34753), 'PIL.Image.fromarray', 'Image.fromarray', (['depth'], {}), '(depth)\n', (34746, 34753), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((35225, 35340), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-depth"""'], {'torch_dtype': 'self.torch_dtype'}), "('fusing/stable-diffusion-v1-5-controlnet-depth'\n    , torch_dtype=self.torch_dtype)\n", (35256, 35340), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((35670, 35733), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (35705, 35733), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((36728, 36750), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (36738, 36750), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((36771, 36795), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (36785, 36795), False, 'import random\n'), ((37487, 37547), 'transformers.pipeline', 'pipeline', (['"""depth-estimation"""'], {'model': '"""Intel/dpt-hybrid-midas"""'}), "('depth-estimation', model='Intel/dpt-hybrid-midas')\n", (37495, 37547), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((37967, 37985), 'PIL.Image.open', 'Image.open', (['inputs'], {}), '(inputs)\n', (37977, 37985), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((38175, 38194), 'numpy.min', 'np.min', (['image_depth'], {}), '(image_depth)\n', (38181, 38194), True, 'import numpy as np\n'), ((38218, 38237), 'numpy.max', 'np.max', (['image_depth'], {}), '(image_depth)\n', (38224, 38237), True, 'import numpy as np\n'), ((38250, 38293), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_32F', '(1)', '(0)'], {'ksize': '(3)'}), '(image, cv2.CV_32F, 1, 0, ksize=3)\n', (38259, 38293), False, 'import cv2\n'), ((38352, 38395), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_32F', '(0)', '(1)'], {'ksize': '(3)'}), '(image, cv2.CV_32F, 0, 1, ksize=3)\n', (38361, 38395), False, 'import cv2\n'), ((38500, 38527), 'numpy.stack', 'np.stack', (['[x, y, z]'], {'axis': '(2)'}), '([x, y, z], axis=2)\n', (38508, 38527), True, 'import numpy as np\n'), ((38682, 38704), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (38697, 38704), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((39228, 39349), 'diffusers.ControlNetModel.from_pretrained', 'ControlNetModel.from_pretrained', (['"""fusing/stable-diffusion-v1-5-controlnet-normal"""'], {'torch_dtype': 'self.torch_dtype'}), "(\n    'fusing/stable-diffusion-v1-5-controlnet-normal', torch_dtype=self.\n    torch_dtype)\n", (39259, 39349), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((39674, 39737), 'diffusers.UniPCMultistepScheduler.from_config', 'UniPCMultistepScheduler.from_config', (['self.pipe.scheduler.config'], {}), '(self.pipe.scheduler.config)\n', (39709, 39737), False, 'from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n'), ((40736, 40758), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (40746, 40758), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((40779, 40803), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (40793, 40803), False, 'import random\n'), ((41636, 41693), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (41665, 41693), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((43095, 43129), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '"""sam"""'], {}), "('checkpoints', 'sam')\n", (43107, 43129), False, 'import os\n'), ((43273, 43295), 'segment_anything.SamPredictor', 'SamPredictor', (['self.sam'], {}), '(self.sam)\n', (43285, 43295), False, 'from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator\n'), ((43326, 43361), 'segment_anything.SamAutomaticMaskGenerator', 'SamAutomaticMaskGenerator', (['self.sam'], {}), '(self.sam)\n', (43351, 43361), False, 'from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator\n'), ((45819, 45841), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (45829, 45841), False, 'import cv2\n'), ((45858, 45896), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (45870, 45896), False, 'import cv2\n'), ((46288, 46310), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (46303, 46310), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((47782, 47809), 'numpy.array', 'np.array', (['self.saved_points'], {}), '(self.saved_points)\n', (47790, 47809), True, 'import numpy as np\n'), ((47832, 47859), 'numpy.array', 'np.array', (['self.saved_labels'], {}), '(self.saved_labels)\n', (47840, 47859), True, 'import numpy as np\n'), ((49547, 49574), 'numpy.array', 'np.array', (['self.saved_points'], {}), '(self.saved_points)\n', (49555, 49574), True, 'import numpy as np\n'), ((49597, 49624), 'numpy.array', 'np.array', (['self.saved_labels'], {}), '(self.saved_labels)\n', (49605, 49624), True, 'import numpy as np\n'), ((50057, 50077), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (50072, 50077), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((50763, 50785), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (50773, 50785), False, 'import cv2\n'), ((50802, 50840), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (50814, 50840), False, 'import cv2\n'), ((50901, 50929), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (50911, 50929), True, 'import matplotlib.pyplot as plt\n'), ((50937, 50954), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (50947, 50954), True, 'import matplotlib.pyplot as plt\n'), ((51092, 51101), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (51099, 51101), True, 'import matplotlib.pyplot as plt\n'), ((51576, 51591), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (51584, 51591), True, 'import matplotlib.pyplot as plt\n'), ((51600, 51677), 'matplotlib.pyplot.savefig', 'plt.savefig', (['updated_image_path'], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'pad_inches': '(0.0)'}), "(updated_image_path, bbox_inches='tight', dpi=300, pad_inches=0.0)\n", (51611, 51677), True, 'import matplotlib.pyplot as plt\n'), ((52005, 52049), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '"""groundingdino"""'], {}), "('checkpoints', 'groundingdino')\n", (52017, 52049), False, 'import os\n'), ((52082, 52132), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '"""grounding_config.py"""'], {}), "('checkpoints', 'grounding_config.py')\n", (52094, 52132), False, 'import os\n'), ((53324, 53365), 'groundingdino.util.slconfig.SLConfig.fromfile', 'SLConfig.fromfile', (['self.model_config_path'], {}), '(self.model_config_path)\n', (53341, 53365), False, 'from groundingdino.util.slconfig import SLConfig\n'), ((53416, 53433), 'groundingdino.models.build_model', 'build_model', (['args'], {}), '(args)\n', (53427, 53433), False, 'from groundingdino.models import build_model\n'), ((53455, 53513), 'torch.load', 'torch.load', (['self.model_checkpoint_path'], {'map_location': '"""cpu"""'}), "(self.model_checkpoint_path, map_location='cpu')\n", (53465, 53513), False, 'import torch\n'), ((55272, 55297), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image_pil'], {}), '(image_pil)\n', (55286, 55297), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((55313, 55346), 'PIL.Image.new', 'Image.new', (['"""L"""', 'image_pil.size', '(0)'], {}), "('L', image_pil.size, 0)\n", (55322, 55346), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((55367, 55387), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (55381, 55387), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((58901, 58922), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (58907, 58922), False, 'from langchain.llms.openai import OpenAI\n'), ((64447, 64469), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (64457, 64469), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((64486, 64524), 'PIL.ImageOps.crop', 'ImageOps.crop', (['image', '(10, 10, 10, 10)'], {}), '(image, (10, 10, 10, 10))\n', (64499, 64524), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((67373, 67395), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (67383, 67395), False, 'import cv2\n'), ((67412, 67450), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (67424, 67450), False, 'import cv2\n'), ((67902, 67930), 'PIL.Image.fromarray', 'Image.fromarray', (['merged_mask'], {}), '(merged_mask)\n', (67917, 67930), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((68354, 68371), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (68365, 68371), True, 'import numpy as np\n'), ((68393, 68424), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (68406, 68424), True, 'import numpy as np\n'), ((70152, 70174), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (70162, 70174), False, 'import cv2\n'), ((70191, 70229), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (70203, 70229), False, 'import cv2\n'), ((70420, 70454), 'torch.where', 'torch.where', (['(mask > 0)', '(True)', '(False)'], {}), '(mask > 0, True, False)\n', (70431, 70454), False, 'import torch\n'), ((70586, 70607), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (70601, 70607), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((72003, 72025), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (72013, 72025), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((72041, 72062), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (72056, 72062), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((74317, 74338), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (74323, 74338), False, 'from langchain.llms.openai import OpenAI\n'), ((74361, 74433), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (74385, 74433), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((75024, 75278), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX\n    }"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX,\n    'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX})\n", (75040, 75278), False, 'from langchain.agents.initialize import initialize_agent\n'), ((76277, 76299), 'PIL.Image.open', 'Image.open', (['image.name'], {}), '(image.name)\n', (76287, 76299), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((77873, 77902), 'os.path.exists', 'os.path.exists', (['"""checkpoints"""'], {}), "('checkpoints')\n", (77887, 77902), False, 'import os\n'), ((77912, 77935), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (77920, 77935), False, 'import os\n'), ((78255, 78311), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (78264, 78311), True, 'import gradio as gr\n'), ((78336, 78406), 'gradio.Radio', 'gr.Radio', ([], {'choices': "['Chinese', 'English']", 'value': 'None', 'label': '"""Language"""'}), "(choices=['Chinese', 'English'], value=None, label='Language')\n", (78344, 78406), True, 'import gradio as gr\n'), ((78426, 78479), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""Visual ChatGPT"""'}), "(elem_id='chatbot', label='Visual ChatGPT')\n", (78436, 78479), True, 'import gradio as gr\n'), ((78496, 78508), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (78504, 78508), True, 'import gradio as gr\n'), ((9623, 9635), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9633, 9635), False, 'import uuid\n'), ((38545, 38588), 'numpy.sum', 'np.sum', (['(image ** 2.0)'], {'axis': '(2)', 'keepdims': '(True)'}), '(image ** 2.0, axis=2, keepdims=True)\n', (38551, 38588), True, 'import numpy as np\n'), ((43569, 43611), 'os.path.exists', 'os.path.exists', (['self.model_checkpoint_path'], {}), '(self.model_checkpoint_path)\n', (43583, 43611), False, 'import os\n'), ((43625, 43675), 'wget.download', 'wget.download', (['url'], {'out': 'self.model_checkpoint_path'}), '(url, out=self.model_checkpoint_path)\n', (43638, 43675), False, 'import wget\n'), ((44434, 44476), 'numpy.array', 'np.array', (['[30 / 255, 144 / 255, 255 / 255]'], {}), '([30 / 255, 144 / 255, 255 / 255])\n', (44442, 44476), True, 'import numpy as np\n'), ((44838, 44916), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(x0, y0)', 'w', 'h'], {'edgecolor': '"""green"""', 'facecolor': '(0, 0, 0, 0)', 'lw': '(2)'}), "((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)\n", (44851, 44916), True, 'import matplotlib.pyplot as plt\n'), ((46488, 46513), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (46511, 46513), False, 'import torch\n'), ((47909, 47934), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (47932, 47934), False, 'import torch\n'), ((49666, 49691), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (49689, 49691), False, 'import torch\n'), ((51264, 51300), 'numpy.ones', 'np.ones', (['(m.shape[0], m.shape[1], 3)'], {}), '((m.shape[0], m.shape[1], 3))\n', (51271, 51300), True, 'import numpy as np\n'), ((52469, 52511), 'os.path.exists', 'os.path.exists', (['self.model_checkpoint_path'], {}), '(self.model_checkpoint_path)\n', (52483, 52511), False, 'import os\n'), ((52525, 52575), 'wget.download', 'wget.download', (['url'], {'out': 'self.model_checkpoint_path'}), '(url, out=self.model_checkpoint_path)\n', (52538, 52575), False, 'import wget\n'), ((52728, 52766), 'os.path.exists', 'os.path.exists', (['self.model_config_path'], {}), '(self.model_config_path)\n', (52742, 52766), False, 'import os\n'), ((52780, 52833), 'wget.download', 'wget.download', (['config_url'], {'out': 'self.model_config_path'}), '(config_url, out=self.model_config_path)\n', (52793, 52833), False, 'import wget\n'), ((53555, 53592), 'groundingdino.util.utils.clean_state_dict', 'clean_state_dict', (["checkpoint['model']"], {}), "(checkpoint['model'])\n", (53571, 53592), False, 'from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap\n'), ((53941, 53956), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (53954, 53956), False, 'import torch\n'), ((54732, 54807), 'groundingdino.util.utils.get_phrases_from_posmap', 'get_phrases_from_posmap', (['(logit > self.text_threshold)', 'tokenized', 'tokenlizer'], {}), '(logit > self.text_threshold, tokenized, tokenlizer)\n', (54755, 54807), False, 'from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap\n'), ((56014, 56038), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (56036, 56038), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((61816, 61850), 'math.sqrt', 'math.sqrt', (['(max_size * aspect_ratio)'], {}), '(max_size * aspect_ratio)\n', (61825, 61850), False, 'import math\n'), ((62474, 62530), 'PIL.ImageOps.crop', 'ImageOps.crop', (['old_img', '(crop_w, crop_h, crop_w, crop_h)'], {}), '(old_img, (crop_w, crop_h, crop_w, crop_h))\n', (62487, 62530), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((75394, 75417), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (75403, 75417), True, 'import gradio as gr\n'), ((75421, 75445), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (75430, 75445), True, 'import gradio as gr\n'), ((75449, 75477), 'gradio.update', 'gr.update', ([], {'placeholder': 'place'}), '(placeholder=place)\n', (75458, 75477), True, 'import gradio as gr\n'), ((75479, 75507), 'gradio.update', 'gr.update', ([], {'value': 'label_clear'}), '(value=label_clear)\n', (75488, 75507), True, 'import gradio as gr\n'), ((78522, 78543), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (78528, 78543), True, 'import gradio as gr\n'), ((13621, 13741), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {'torch_dtype': 'self.torch_dtype'}), "(\n    'Salesforce/blip-image-captioning-base', torch_dtype=self.torch_dtype)\n", (13665, 13741), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((16114, 16206), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (16158, 16206), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((19521, 19613), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (19565, 19613), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((22980, 23072), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (23024, 23072), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((26434, 26526), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (26478, 26526), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((29581, 29673), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (29625, 29673), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((32071, 32163), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (32115, 32163), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((35509, 35601), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (35553, 35601), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((38454, 38469), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (38466, 38469), True, 'import numpy as np\n'), ((39513, 39605), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (39557, 39605), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((41715, 41817), 'transformers.BlipForQuestionAnswering.from_pretrained', 'BlipForQuestionAnswering.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {'torch_dtype': 'self.torch_dtype'}), "('Salesforce/blip-vqa-base',\n    torch_dtype=self.torch_dtype)\n", (41755, 41817), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((42403, 42425), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (42413, 42425), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((43184, 43232), 'segment_anything.build_sam', 'build_sam', ([], {'checkpoint': 'self.model_checkpoint_path'}), '(checkpoint=self.model_checkpoint_path)\n', (43193, 43232), False, 'from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator\n'), ((45169, 45195), 'torch.Tensor', 'torch.Tensor', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (45181, 45195), False, 'import torch\n'), ((51459, 51478), 'numpy.dstack', 'np.dstack', (['(img, m)'], {}), '((img, m))\n', (51468, 51478), True, 'import numpy as np\n'), ((52912, 52934), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (52922, 52934), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((53026, 53062), 'groundingdino.datasets.transforms.RandomResize', 'T.RandomResize', (['[512]'], {'max_size': '(1333)'}), '([512], max_size=1333)\n', (53040, 53062), True, 'import groundingdino.datasets.transforms as T\n'), ((53080, 53092), 'groundingdino.datasets.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (53090, 53092), True, 'import groundingdino.datasets.transforms as T\n'), ((53110, 53167), 'groundingdino.datasets.transforms.Normalize', 'T.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (53121, 53167), True, 'import groundingdino.datasets.transforms as T\n'), ((55528, 55554), 'torch.Tensor', 'torch.Tensor', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (55540, 55554), False, 'import torch\n'), ((62849, 62898), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'temp_canvas_size'], {'color': '"""white"""'}), "('RGB', temp_canvas_size, color='white')\n", (62858, 62898), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((62900, 62947), 'PIL.Image.new', 'Image.new', (['"""L"""', 'temp_canvas_size'], {'color': '"""white"""'}), "('L', temp_canvas_size, color='white')\n", (62909, 62947), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((66677, 66707), 'torch.tensor', 'torch.tensor', (['masks'], {'dtype': 'int'}), '(masks, dtype=int)\n', (66689, 66707), False, 'import torch\n'), ((70368, 70391), 'torch.sum', 'torch.sum', (['masks'], {'dim': '(0)'}), '(masks, dim=0)\n', (70377, 70391), False, 'import torch\n'), ((76482, 76508), 'numpy.round', 'np.round', (['(width_new / 64.0)'], {}), '(width_new / 64.0)\n', (76490, 76508), True, 'import numpy as np\n'), ((76540, 76567), 'numpy.round', 'np.round', (['(height_new / 64.0)'], {}), '(height_new / 64.0)\n', (76548, 76567), True, 'import numpy as np\n'), ((78576, 78596), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (78585, 78596), True, 'import gradio as gr\n'), ((78772, 78806), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (78781, 78806), True, 'import gradio as gr\n'), ((78832, 78850), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (78841, 78850), True, 'import gradio as gr\n'), ((78868, 78902), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (78877, 78902), True, 'import gradio as gr\n'), ((78926, 78975), 'gradio.UploadButton', 'gr.UploadButton', ([], {'label': '"""🖼️"""', 'file_types': "['image']"}), "(label='🖼️', file_types=['image'])\n", (78941, 78975), True, 'import gradio as gr\n'), ((14093, 14115), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (14103, 14115), False, 'from PIL import Image, ImageDraw, ImageOps, ImageFont\n'), ((44370, 44389), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (44386, 44389), True, 'import numpy as np\n'), ((10416, 10508), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (10460, 10508), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((51326, 51350), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (51342, 51350), True, 'import numpy as np\n'), ((55706, 55739), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(3)'}), '(0, 255, size=3)\n', (55723, 55739), True, 'import numpy as np\n'), ((58223, 58315), 'diffusers.pipelines.stable_diffusion.StableDiffusionSafetyChecker.from_pretrained', 'StableDiffusionSafetyChecker.from_pretrained', (['"""CompVis/stable-diffusion-safety-checker"""'], {}), "(\n    'CompVis/stable-diffusion-safety-checker')\n", (58267, 58315), False, 'from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\n'), ((74235, 74296), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'func.name', 'description': 'func.description', 'func': 'func'}), '(name=func.name, description=func.description, func=func)\n', (74239, 74296), False, 'from langchain.agents.tools import Tool\n'), ((78620, 78715), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n    'Enter text and press enter, or upload an image')\n", (78630, 78715), True, 'import gradio as gr\n'), ((12963, 12975), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12973, 12975), False, 'import uuid\n'), ((76193, 76205), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (76203, 76205), False, 'import uuid\n'), ((73549, 73583), 'inspect.signature', 'inspect.signature', (['module.__init__'], {}), '(module.__init__)\n', (73566, 73583), False, 'import inspect\n')] | 
| 
	from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
    AuthenticationError,
    InvalidRequestError,
    RateLimitError,
    ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
    try:
        return parser.parse(completion)
    except OutputParserException as e:
        raise OpenAIError(
            e, "There was an issue parsing the response from the AI model."
        )
async def openai_error_handler(
    func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
    try:
        return await func(*args, **kwargs)
    except ServiceUnavailableError as e:
        raise OpenAIError(
            e,
            "OpenAI is experiencing issues. Visit "
            "https://status.openai.com/ for more info.",
            should_log=not settings.custom_api_key,
        )
    except InvalidRequestError as e:
        if e.user_message.startswith("The model:"):
            raise OpenAIError(
                e,
                f"Your API key does not have access to your current model. Please use a different model.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except AuthenticationError as e:
        raise OpenAIError(
            e,
            "Authentication error: Ensure a valid API key is being used.",
            should_log=not settings.custom_api_key,
        )
    except RateLimitError as e:
        if e.user_message.startswith("You exceeded your current quota"):
            raise OpenAIError(
                e,
                f"Your API key exceeded your current quota, please check your plan and billing details.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except Exception as e:
        raise OpenAIError(
            e, "There was an unexpected issue getting a response from the AI model."
        )
async def call_model_with_handling(
    model: BaseChatModel,
    prompt: BasePromptTemplate,
    args: Dict[str, str],
    settings: ModelSettings,
    **kwargs: Any,
) -> str:
    chain = LLMChain(llm=model, prompt=prompt)
    return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
 | 
	[
  "langchain.LLMChain"
] | 
	[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n    , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'Authentication error: Ensure a valid API key is being used.',\n    should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n    'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key does not have access to your current model. Please use a different model.'\n    , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key exceeded your current quota, please check your plan and billing details.'\n    , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] | 
| 
	import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import  make_archive, copy_templates
from utils.tex_processing import create_copies
from utils.gpt_interaction import GPTModel
from utils.prompts import SYSTEM
from utils.embeddings import EMBEDDINGS
from utils.gpt_interaction import get_gpt_responses
TOTAL_TOKENS = 0
TOTAL_PROMPTS_TOKENS = 0
TOTAL_COMPLETION_TOKENS = 0
def log_usage(usage, generating_target, print_out=True):
    global TOTAL_TOKENS
    global TOTAL_PROMPTS_TOKENS
    global TOTAL_COMPLETION_TOKENS
    prompts_tokens = usage['prompt_tokens']
    completion_tokens = usage['completion_tokens']
    total_tokens = usage['total_tokens']
    TOTAL_TOKENS += total_tokens
    TOTAL_PROMPTS_TOKENS += prompts_tokens
    TOTAL_COMPLETION_TOKENS += completion_tokens
    message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
              f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
              f"{TOTAL_TOKENS} tokens have been used in total."
    if print_out:
        print(message)
    logging.info(message)
def _generation_setup(title,  template="Default",
                      tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,  # generating references
                      knowledge_database=None, max_tokens_kd=2048, query_counts=10):
    llm = GPTModel(model="gpt-3.5-turbo-16k")
    bibtex_path, destination_folder = copy_templates(template, title)
    logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
    #generate key words
    keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
    log_usage(usage, "keywords")
    keywords = {keyword: max_kw_refs for keyword in keywords}
    print("Keywords: \n", keywords)
    #generate references
    ref = References(title, bib_refs)
    ref.collect_papers(keywords, tldr=tldr)
    references = ref.to_prompts(max_tokens=max_tokens_ref)
    all_paper_ids = ref.to_bibtex(bibtex_path)
    #product domain knowledge
    prompts = f"Title: {title}"
    preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
    # check if the database exists or not
    db_path = f"utils/knowledge_databases/{knowledge_database}"
    db_config_path = os.path.join(db_path, "db_meta.json")
    db_index_path = os.path.join(db_path, "faiss_index")
    if os.path.isdir(db_path):
        try:
            with open(db_config_path, "r", encoding="utf-8") as f:
                db_config = json.load(f)
            model_name = db_config["embedding_model"]
            embeddings = EMBEDDINGS[model_name]
            db = FAISS.load_local(db_index_path, embeddings)
            knowledge = Knowledge(db=db)
            knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
            domain_knowledge = knowledge.to_prompts(max_tokens_kd)
        except Exception as e:
            domain_knowledge=''
    prompts = f"Title: {title}"
    syetem_promot =  "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format."
    components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True)
    log_usage(usage, "media")
    print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
    paper = {}
    paper["title"] = title
    paper["references"] = references
    paper["bibtex"] = bibtex_path
    paper["components"] = components
    paper["domain_knowledge"] = domain_knowledge
    return paper, destination_folder, all_paper_ids
def section_generation(paper, section, save_to_path, model, research_field="machine learning"):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    title = paper["title"]
    references = paper["references"]
    components = paper['components']
    instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.'
    fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n"
    instruction_subprompt = "\n" \
                            "Your response should follow the following instructions:\n" \
                            "{instruction}\n"
    ref_instruction_subprompt = "- Read references. " \
                                "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
                                "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
                                "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \
                                "- Avoid citing the same reference in a same paragraph.\n" \
                                "\n" \
                                "References:\n" \
                                "{references}"
    output_subprompt = "Ensure that it can be directly compiled by LeTaX."
    reivew_prompts = PromptTemplate(
        input_variables=["title", "components", "instruction", "section", "references"],
        template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt)
    prompts = reivew_prompts.format(title=title,
                                    components=components,
                                    instruction=instruction,
                                    section=section,
                                    references=references)
    SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
                                               template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." )
    output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts,
                                      model=model, temperature=0.4)
    output=output[25:]
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    with open(tex_file, "w", encoding="utf-8") as f:
        f.write(output)
    use_md =True
    use_chinese = True
    if use_md:
        system_md = 'You are an translator between the  LaTeX and .MD. here is a latex file where the content is: \n \n ' + output
        prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title']
        output_md, usage_md = get_gpt_responses(system_md, prompts_md,
                                          model=model, temperature=0.4)
        md_file = os.path.join(save_to_path, f"{'survey'}.md")
        with open(md_file, "w", encoding="utf-8") as m:
            m.write(output_md)
        if use_chinese == True:
            system_md_chi = 'You are an translator between the  english and chinese. here is a english file where the content is: \n \n ' + output
            prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.'
            output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi,
                                                    model=model, temperature=0.4)
            md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md")
            with open(md_file_chi, "w", encoding="utf-8") as c:
                c.write(output_md_chi)
    return usage
def generate_draft(title,  tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,
                   knowledge_database=None, max_tokens_kd=2048, query_counts=10,
                   section='related works', model="gpt-3.5-turbo-16k", template="Default"
                   , save_zip=None):
    print("================START================")
    paper, destination_folder, _ = _generation_setup(title,  template, tldr, max_kw_refs, bib_refs,
                                                     max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
                                                     query_counts=query_counts,
                                                     knowledge_database=knowledge_database)
    # main components
    print(f"================PROCESSING================")
    usage = section_generation(paper, section, destination_folder, model=model)
    log_usage(usage, section)
    create_copies(destination_folder)
    print("\nPROCESSING COMPLETE\n")
    return make_archive(destination_folder, title+".zip")
    print("draft has been generated in " + destination_folder)
if __name__ == "__main__":
    import openai
    openai.api_key = "your key"
    openai.api_base = 'https://api.openai.com/v1'
    
    #openai.proxy = "socks5h://localhost:7890 # if use the vpn
    target_title = "Reinforcement Learning for Robot Control"
    generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
 | 
	[
  "langchain.vectorstores.FAISS.load_local",
  "langchain.PromptTemplate"
] | 
	[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n    output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n    'section', 'references'], template=fundamental_subprompt +\n    instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n    'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n    )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')] | 
| 
	import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural'))
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
AUDIO_CHATGPT_PREFIX = """AudioGPT
AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files. 
AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated.
Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. 
TOOLS:
------
AudioGPT has access to the following tools:"""
AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists.
You will remember to provide the audio file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Thought: Do I need to use a tool? {agent_scratchpad}"""
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
    tokens = history_memory.split()
    n_tokens = len(tokens)
    print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
    if n_tokens < keep_last_n_words:
        return history_memory
    else:
        paragraphs = history_memory.split('\n')
        last_n_tokens = n_tokens
        while last_n_tokens >= keep_last_n_words:
            last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
            paragraphs = paragraphs[1:]
        return '\n' + '\n'.join(paragraphs)
def merge_audio(audio_path_1, audio_path_2):
    merged_signal = []
    sr_1, signal_1 = wavfile.read(audio_path_1)
    sr_2, signal_2 = wavfile.read(audio_path_2)
    merged_signal.append(signal_1)
    merged_signal.append(signal_2)
    merged_signal = np.hstack(merged_signal)
    merged_signal = np.asarray(merged_signal, dtype=np.int16)
    audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
    wavfile.write(audio_filename, sr_2, merged_signal)
    return audio_filename
class T2I:
    def __init__(self, device):
        from transformers import AutoModelForCausalLM, AutoTokenizer
        from diffusers import StableDiffusionPipeline
        from transformers import pipeline
        print("Initializing T2I to %s" % device)
        self.device = device
        self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
        self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
        self.pipe.to(device)
    def inference(self, text):
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
        print(f'{text} refined to {refined_text}')
        image = self.pipe(refined_text).images[0]
        image.save(image_filename)
        print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
        return image_filename
class ImageCaptioning:
    def __init__(self, device):
        from transformers import BlipProcessor, BlipForConditionalGeneration
        print("Initializing ImageCaptioning to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
        self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
    def inference(self, image_path):
        inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        captions = self.processor.decode(out[0], skip_special_tokens=True)
        return captions
class T2A:
    def __init__(self, device):
        print("Initializing Make-An-Audio to %s" % device)
        self.device = device
        self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device)
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
    def _initialize_model(self, config, ckpt, device):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        model = model.to(device)
        model.cond_stage_model.to(model.device)
        model.cond_stage_model.device = model.device
        sampler = DDIMSampler(model)
        return sampler
    def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
        SAMPLE_RATE = 16000
        prng = np.random.RandomState(seed)
        start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
        c = self.sampler.model.get_learned_conditioning(n_samples * [text])
        shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8]  # (z_dim, 80//2^x, 848//2^x)
        samples_ddim, _ = self.sampler.sample(S = ddim_steps,
                                            conditioning = c,
                                            batch_size = n_samples,
                                            shape = shape,
                                            verbose = False,
                                            unconditional_guidance_scale = scale,
                                            unconditional_conditioning = uc,
                                            x_T = start_code)
        x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
        x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
        wav_list = []
        for idx,spec in enumerate(x_samples_ddim):
            wav = self.vocoder.vocode(spec)
            wav_list.append((SAMPLE_RATE,wav))
        best_wav = self.select_best_audio(text, wav_list)
        return best_wav
    def select_best_audio(self, prompt, wav_list):
        from wav_evaluation.models.CLAPWrapper import CLAPWrapper
        clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml',
                                 use_cuda=torch.cuda.is_available())
        text_embeddings = clap_model.get_text_embeddings([prompt])
        score_list = []
        for data in wav_list:
            sr, wav = data
            audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True)
            score = clap_model.compute_similarity(audio_embeddings, text_embeddings,
                                                  use_logit_scale=False).squeeze().cpu().numpy()
            score_list.append(score)
        max_index = np.array(score_list).argmax()
        print(score_list, max_index)
        return wav_list[max_index]
    def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
        melbins,mel_len = 80,624
        with torch.no_grad():
            result = self.txt2audio(
                text = text,
                H = melbins,
                W = mel_len
            )
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, result[1], samplerate = 16000)
        print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}")
        return audio_filename
class I2A:
    def __init__(self, device):
        print("Initializing Make-An-Audio-Image to %s" % device)
        self.device = device
        self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device)
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
    def _initialize_model(self, config, ckpt, device):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        model = model.to(device)
        model.cond_stage_model.to(model.device)
        model.cond_stage_model.device = model.device
        sampler = DDIMSampler(model)
        return sampler
    def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
        SAMPLE_RATE = 16000
        n_samples = 1 # only support 1 sample
        prng = np.random.RandomState(seed)
        start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
        #image = Image.fromarray(image)
        image = Image.open(image)
        image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0)
        image_embedding = self.sampler.model.cond_stage_model.forward_img(image)
        c = image_embedding.repeat(n_samples, 1, 1)
        shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8]  # (z_dim, 80//2^x, 848//2^x)
        samples_ddim, _ = self.sampler.sample(S=ddim_steps,
                                            conditioning=c,
                                            batch_size=n_samples,
                                            shape=shape,
                                            verbose=False,
                                            unconditional_guidance_scale=scale,
                                            unconditional_conditioning=uc,
                                            x_T=start_code)
        x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
        x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
        wav_list = []
        for idx,spec in enumerate(x_samples_ddim):
            wav = self.vocoder.vocode(spec)
            wav_list.append((SAMPLE_RATE,wav))
        best_wav = wav_list[0]
        return best_wav
    def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
        melbins,mel_len = 80,624
        with torch.no_grad():
            result = self.img2audio(
                image=image,
                H=melbins,
                W=mel_len
            )
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, result[1], samplerate = 16000)
        print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}")
        return audio_filename
class TTS:
    def __init__(self, device=None):
        from inference.tts.PortaSpeech import TTSInference
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing PortaSpeech to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/ps_adv_baseline'
        self.set_model_hparams()
        self.inferencer = TTSInference(self.hp, device)
    def set_model_hparams(self):
        set_hparams(exp_name=self.exp_name, print_hparams=False)
        self.hp = hp
    def inference(self, text):
        self.set_model_hparams()
        inp = {"text": text}
        out = self.inferencer.infer_once(inp)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, out, samplerate=22050)
        return audio_filename
class T2S:
    def __init__(self, device= None):
        from inference.svs.ds_e2e import DiffSingerE2EInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing DiffSinger to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/0831_opencpop_ds1000'
        self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml'
        self.set_model_hparams()
        self.pipe = DiffSingerE2EInfer(self.hp, device)
        self.default_inp = {
            'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
            'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
            'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
        }
    def set_model_hparams(self):
        set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
        self.hp = hp
    def inference(self, inputs):
        self.set_model_hparams()
        val = inputs.split(",")
        key = ['text', 'notes', 'notes_duration']
        try:
            inp = {k: v for k, v in zip(key, val)}
            wav = self.pipe.infer_once(inp)
        except:
            print('Error occurs. Generate default audio sample.\n')
            inp = self.default_inp
            wav = self.pipe.infer_once(inp)
        #if inputs == '' or len(val) < len(key):
        #    inp = self.default_inp
        #else:
        #    inp = {k:v for k,v in zip(key,val)}
        #wav = self.pipe.infer_once(inp)
        wav *= 32767
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
        print(f"Processed T2S.run, audio_filename: {audio_filename}")
        return audio_filename
class t2s_VISinger:
    def __init__(self, device=None):
        from espnet2.bin.svs_inference import SingingGenerate
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing VISingere to %s" % device)
        tag = 'AQuarterMile/opencpop_visinger1'
        self.model = SingingGenerate.from_pretrained(
            model_tag=str_or_none(tag),
            device=device,
        )
        phn_dur = [[0.        , 0.219     ],
            [0.219     , 0.50599998],
            [0.50599998, 0.71399999],
            [0.71399999, 1.097     ],
            [1.097     , 1.28799999],
            [1.28799999, 1.98300004],
            [1.98300004, 7.10500002],
            [7.10500002, 7.60400009]]
        phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP']
        score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']]
        tempo = 70
        tmp = {}
        tmp["label"] = phn_dur, phn
        tmp["score"] = tempo, score
        self.default_inp = tmp
    def inference(self, inputs):
        val = inputs.split(",")
        key = ['text', 'notes', 'notes_duration']
        try: # TODO: input will be update
            inp = {k: v for k, v in zip(key, val)}
            wav = self.model(text=inp)["wav"]
        except:
            print('Error occurs. Generate default audio sample.\n')
            inp = self.default_inp
            wav = self.model(text=inp)["wav"]
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, wav, samplerate=self.model.fs)
        return audio_filename
class TTS_OOD:
    def __init__(self, device):
        from inference.tts.GenerSpeech import GenerSpeechInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing GenerSpeech to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/GenerSpeech'
        self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml'
        self.set_model_hparams()
        self.pipe = GenerSpeechInfer(self.hp, device)
    def set_model_hparams(self):
        set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
        f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy'
        if os.path.exists(f0_stats_fn):
            hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn)
            hp['f0_mean'] = float(hp['f0_mean'])
            hp['f0_std'] = float(hp['f0_std'])
        hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt'
        self.hp = hp
    def inference(self, inputs):
        self.set_model_hparams()
        key = ['ref_audio', 'text']
        val = inputs.split(",")
        inp = {k: v for k, v in zip(key, val)}
        wav = self.pipe.infer_once(inp)
        wav *= 32767
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
        print(
            f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}")
        return audio_filename
class Inpaint:
    def __init__(self, device):
        print("Initializing Make-An-Audio-inpaint to %s" % device)
        self.device = device
        self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt')
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
        self.cmap_transform = matplotlib.cm.viridis
    def _initialize_model_inpaint(self, config, ckpt):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        model = model.to(device)
        print(model.device, device, model.cond_stage_model.device)
        sampler = DDIMSampler(model)
        return sampler
    def make_batch_sd(self, mel, mask, num_samples=1):
        mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
        mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
        masked_mel = (1 - mask) * mel
        mel = mel * 2 - 1
        mask = mask * 2 - 1
        masked_mel = masked_mel * 2 -1
        batch = {
             "mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
             "mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples),
             "masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
        }
        return batch
    def gen_mel(self, input_audio_path):
        SAMPLE_RATE = 16000
        sr, ori_wav = wavfile.read(input_audio_path)
        print("gen_mel")
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
        if len(ori_wav.shape)==2:# stereo
            ori_wav = librosa.to_mono(ori_wav.T)
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
        mel_len,hop_size = 848,256
        input_len = mel_len * hop_size
        if len(ori_wav) < input_len:
            input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
        else:
            input_wav = ori_wav[:input_len]
        mel = TRANSFORMS_16000(input_wav)
        return mel
    def gen_mel_audio(self, input_audio):
        SAMPLE_RATE = 16000
        sr,ori_wav = input_audio
        print("gen_mel_audio")
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
        if len(ori_wav.shape)==2:# stereo
            ori_wav = librosa.to_mono(ori_wav.T)
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
        mel_len,hop_size = 848,256
        input_len = mel_len * hop_size
        if len(ori_wav) < input_len:
            input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
        else:
            input_wav = ori_wav[:input_len]
        mel = TRANSFORMS_16000(input_wav)
        return mel
    def show_mel_fn(self, input_audio_path):
        crop_len = 500
        crop_mel = self.gen_mel(input_audio_path)[:,:crop_len]
        color_mel = self.cmap_transform(crop_mel)
        image = Image.fromarray((color_mel*255).astype(np.uint8))
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        image.save(image_filename)
        return image_filename
    def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
        model = self.sampler.model
        prng = np.random.RandomState(seed)
        start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
        cc = torch.nn.functional.interpolate(batch["mask"],
                                                size=c.shape[-2:])
        c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
        shape = (c.shape[1]-1,)+c.shape[2:]
        samples_ddim, _ = self.sampler.sample(S=ddim_steps,
                                            conditioning=c,
                                            batch_size=c.shape[0],
                                            shape=shape,
                                            verbose=False)
        x_samples_ddim = model.decode_first_stage(samples_ddim)
        mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
        mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
        predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
        inpainted = (1-mask)*mel+mask*predicted_mel
        inpainted = inpainted.cpu().numpy().squeeze()
        inapint_wav = self.vocoder.vocode(inpainted)
        return inpainted, inapint_wav
    def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100):
        SAMPLE_RATE = 16000
        torch.set_grad_enabled(False)
        mel_img = Image.open(mel_and_mask['image'])
        mask_img = Image.open(mel_and_mask["mask"])
        show_mel = np.array(mel_img.convert("L"))/255
        mask = np.array(mask_img.convert("L"))/255
        mel_bins,mel_len = 80,848
        input_mel = self.gen_mel_audio(input_audio)[:,:mel_len]
        mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)
        print(mask.shape,input_mel.shape)
        with torch.no_grad():
            batch = self.make_batch_sd(input_mel,mask,num_samples=1)
            inpainted,gen_wav = self.inpaint(
                batch=batch,
                seed=seed,
                ddim_steps=ddim_steps,
                num_samples=1,
                H=mel_bins, W=mel_len
            )
        inpainted = inpainted[:,:show_mel.shape[1]]
        color_mel = self.cmap_transform(inpainted)
        input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
        gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
        image = Image.fromarray((color_mel*255).astype(np.uint8))
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        image.save(image_filename)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, gen_wav, samplerate = 16000)
        return image_filename, audio_filename
    
class ASR:
    def __init__(self, device):
        print("Initializing Whisper to %s" % device)
        self.device = device
        self.model = whisper.load_model("base", device=device)
    def inference(self, audio_path):
        audio = whisper.load_audio(audio_path)
        audio = whisper.pad_or_trim(audio)
        mel = whisper.log_mel_spectrogram(audio).to(self.device)
        _, probs = self.model.detect_language(mel)
        options = whisper.DecodingOptions()
        result = whisper.decode(self.model, mel, options)
        return result.text
    def translate_english(self, audio_path):
        audio = self.model.transcribe(audio_path, language='English')
        return audio['text']
class A2T:
    def __init__(self, device):
        from audio_to_text.inference_waveform import AudioCapModel
        print("Initializing Audio-To-Text Model to %s" % device)
        self.device = device
        self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm")
    def inference(self, audio_path):
        audio = whisper.load_audio(audio_path)
        caption_text = self.model(audio)
        return caption_text[0]
class GeneFace:
    def __init__(self, device=None):
        print("Initializing GeneFace model to %s" % device)
        from audio_to_face.GeneFace_binding import GeneFaceInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = device
        self.geneface_model = GeneFaceInfer(device)
        print("Loaded GeneFace model")
    def inference(self, audio_path):
        audio_base_name = os.path.basename(audio_path)[:-4]
        out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4")
        inp = {
            'audio_source_name': audio_path,
            'out_npy_name': f'geneface/tmp/{audio_base_name}.npy',
            'cond_name': f'geneface/tmp/{audio_base_name}.npy',
            'out_video_name': out_video_name,
            'tmp_imgs_dir': f'video/tmp_imgs',
        }
        self.geneface_model.infer_once(inp)
        return out_video_name
class SoundDetection:
    def __init__(self, device):
        self.device = device
        self.sample_rate = 32000
        self.window_size = 1024
        self.hop_size = 320
        self.mel_bins = 64
        self.fmin = 50
        self.fmax = 14000
        self.model_type = 'PVT'
        self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth'
        self.classes_num = detection_config.classes_num
        self.labels = detection_config.labels
        self.frames_per_second = self.sample_rate // self.hop_size
        # Model = eval(self.model_type)
        self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size, 
            hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax, 
            classes_num=self.classes_num)
        checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model'])
        self.model.to(device)
    def inference(self, audio_path):
        # Forward
        (waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True)
        waveform = waveform[None, :]    # (1, audio_length)
        waveform = torch.from_numpy(waveform)
        waveform = waveform.to(self.device)
        # Forward
        with torch.no_grad():
            self.model.eval()
            batch_output_dict = self.model(waveform, None)
        framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
        """(time_steps, classes_num)"""
        # print('Sound event detection result (time_steps x classes_num): {}'.format(
        #     framewise_output.shape))
        import numpy as np
        import matplotlib.pyplot as plt
        sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
        top_k = 10  # Show top results
        top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]    
        """(time_steps, top_k)"""
        # Plot result    
        stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size, 
            hop_length=self.hop_size, window='hann', center=True)
        frames_num = stft.shape[-1]
        fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
        axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
        axs[0].set_ylabel('Frequency bins')
        axs[0].set_title('Log spectrogram')
        axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
        axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second))
        axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second))
        axs[1].yaxis.set_ticks(np.arange(0, top_k))
        axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]])
        axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
        axs[1].set_xlabel('Seconds')
        axs[1].xaxis.set_ticks_position('bottom')
        plt.tight_layout()
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        plt.savefig(image_filename)
        return image_filename
class SoundExtraction:
    def __init__(self, device):
        from sound_extraction.model.LASSNet import LASSNet
        from sound_extraction.utils.stft import STFT
        import torch.nn as nn
        self.device = device
        self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt'
        self.stft = STFT()
        self.model = nn.DataParallel(LASSNet(device)).to(device)
        checkpoint = torch.load(self.model_file)
        self.model.load_state_dict(checkpoint['model'])
        self.model.eval()
    def inference(self, inputs):
        #key = ['ref_audio', 'text']
        from sound_extraction.utils.wav_io import load_wav, save_wav
        val = inputs.split(",")
        audio_path = val[0] # audio_path, text
        text = val[1]
        waveform = load_wav(audio_path)
        waveform = torch.tensor(waveform).transpose(1,0)
        mixed_mag, mixed_phase = self.stft.transform(waveform)
        text_query = ['[CLS] ' + text]
        mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device)
        est_mask = self.model(mixed_mag, text_query)
        est_mag = est_mask * mixed_mag  
        est_mag = est_mag.squeeze(1)  
        est_mag = est_mag.permute(0, 2, 1) 
        est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase)
        est_wav = est_wav.squeeze(0).squeeze(0).numpy()  
        #est_path = f'output/est{i}.wav'
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        print('audio_filename ', audio_filename)
        save_wav(est_wav, audio_filename)
        return audio_filename
class Binaural:
    def __init__(self, device):
        from src.models import BinauralNetwork
        self.device = device
        self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net'
        self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions2.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions3.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions4.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions5.txt']
        self.net = BinauralNetwork(view_dim=7,
                      warpnet_layers=4,
                      warpnet_channels=64,
                      )
        self.net.load_from_file(self.model_file)
        self.sr = 48000
    def inference(self, audio_path):
        mono, sr  = librosa.load(path=audio_path, sr=self.sr, mono=True)
        mono = torch.from_numpy(mono)
        mono = mono.unsqueeze(0)
        import numpy as np
        import random
        rand_int = random.randint(0,4)
        view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32)
        view = torch.from_numpy(view)
        if not view.shape[-1] * 400 == mono.shape[-1]:
            mono = mono[:,:(mono.shape[-1]//400)*400] # 
            if view.shape[1]*400 > mono.shape[1]:
                m_a = view.shape[1] - mono.shape[-1]//400 
                rand_st = random.randint(0,m_a)
                view = view[:,m_a:m_a+(mono.shape[-1]//400)] # 
        # binauralize and save output
        self.net.eval().to(self.device)
        mono, view = mono.to(self.device), view.to(self.device)
        chunk_size = 48000  # forward in chunks of 1s
        rec_field =  1000  # add 1000 samples as "safe bet" since warping has undefined rec. field
        rec_field -= rec_field % 400  # make sure rec_field is a multiple of 400 to match audio and view frequencies
        chunks = [
            {
                "mono": mono[:, max(0, i-rec_field):i+chunk_size],
                "view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
            }
            for i in range(0, mono.shape[-1], chunk_size)
        ]
        for i, chunk in enumerate(chunks):
            with torch.no_grad():
                mono = chunk["mono"].unsqueeze(0)
                view = chunk["view"].unsqueeze(0)
                binaural = self.net(mono, view).squeeze(0)
                if i > 0:
                    binaural = binaural[:, -(mono.shape[-1]-rec_field):]
                chunk["binaural"] = binaural
        binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1)
        binaural = torch.clamp(binaural, min=-1, max=1).cpu()
        #binaural = chunked_forwarding(net, mono, view)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        import torchaudio
        torchaudio.save(audio_filename, binaural, sr)
        #soundfile.write(audio_filename, binaural, samplerate = 48000)
        print(f"Processed Binaural.run, audio_filename: {audio_filename}")
        return audio_filename
class TargetSoundDetection:
    def __init__(self, device):
        from target_sound_detection.src import models as tsd_models
        from target_sound_detection.src.models import event_labels
        self.device = device
        self.MEL_ARGS = {
            'n_mels': 64,
            'n_fft': 2048,
            'hop_length': int(22050 * 20 / 1000),
            'win_length': int(22050 * 40 / 1000)
        }
        self.EPS = np.spacing(1)
        self.clip_model, _ = clip.load("ViT-B/32", device=self.device)
        self.event_labels = event_labels
        self.id_to_event =  {i : label for i, label in enumerate(self.event_labels)}
        config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu')
        config_parameters = dict(config)
        config_parameters['tao'] = 0.6
        if 'thres' not in config_parameters.keys():
            config_parameters['thres'] = 0.5
        if 'time_resolution' not in config_parameters.keys():
            config_parameters['time_resolution'] = 125
        model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'
                                        , map_location=lambda storage, loc: storage) # load parameter 
        self.model = getattr(tsd_models, config_parameters['model'])(config_parameters,
                    inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args'])
        self.model.load_state_dict(model_parameters)
        self.model = self.model.to(self.device).eval()
        self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')
        self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')
    def extract_feature(self, fname):
        import soundfile as sf
        y, sr = sf.read(fname, dtype='float32')
        print('y ', y.shape)
        ti = y.shape[0]/sr
        if y.ndim > 1:
            y = y.mean(1)
        y = librosa.resample(y, sr, 22050)
        lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T
        return lms_feature,ti
    
    def build_clip(self, text):
        text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"]
        text_features = self.clip_model.encode_text(text)
        return text_features
    
    def cal_similarity(self, target, retrievals):
        ans = []
        #target =torch.from_numpy(target)
        for name in retrievals.keys():
            tmp = retrievals[name]
            #tmp = torch.from_numpy(tmp)
            s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0)
            ans.append(s.item())
        return ans.index(max(ans))
    
    def inference(self, text, audio_path):
        from target_sound_detection.src.utils import median_filter, decode_with_timestamps
        target_emb = self.build_clip(text) # torch type
        idx = self.cal_similarity(target_emb, self.re_embeds)
        target_event = self.id_to_event[idx]
        embedding = self.ref_mel[target_event]
        embedding = torch.from_numpy(embedding)
        embedding = embedding.unsqueeze(0).to(self.device).float()
        #print('embedding ', embedding.shape)
        inputs,ti = self.extract_feature(audio_path)
        #print('ti ', ti)
        inputs = torch.from_numpy(inputs)
        inputs = inputs.unsqueeze(0).to(self.device).float()
        #print('inputs ', inputs.shape)
        decision, decision_up, logit = self.model(inputs, embedding)
        pred = decision_up.detach().cpu().numpy()
        pred = pred[:,:,0]
        frame_num = decision_up.shape[1]
        time_ratio = ti / frame_num
        filtered_pred = median_filter(pred, window_size=1, threshold=0.5)
        #print('filtered_pred ', filtered_pred)
        time_predictions = []
        for index_k in range(filtered_pred.shape[0]):
            decoded_pred = []
            decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:])
            if len(decoded_pred_) == 0: # neg deal
                decoded_pred_.append((target_event, 0, 0))
            decoded_pred.append(decoded_pred_)
            for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
                cur_pred = pred[num_batch]
                # Save each frame output, for later visualization
                label_prediction = decoded_pred[num_batch] # frame predict
                # print(label_prediction)
                for event_label, onset, offset in label_prediction:
                    time_predictions.append({
                        'onset': onset*time_ratio,
                        'offset': offset*time_ratio,})
        ans = ''
        for i,item in enumerate(time_predictions):
            ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + '  end_time: ' + str(item['offset']) + '\t'
        #print(ans)
        return ans
# class Speech_Enh_SS_SC:
#     """Speech Enhancement or Separation in single-channel
#     Example usage:
#         enh_model = Speech_Enh_SS("cuda")
#         enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
#     """
#     def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"):
#         self.model_name = model_name
#         self.device = device
#         print("Initializing ESPnet Enh to %s" % device)
#         self._initialize_model()
#     def _initialize_model(self):
#         from espnet_model_zoo.downloader import ModelDownloader
#         from espnet2.bin.enh_inference import SeparateSpeech
#         d = ModelDownloader()
#         cfg = d.download_and_unpack(self.model_name)
#         self.separate_speech = SeparateSpeech(
#             train_config=cfg["train_config"],
#             model_file=cfg["model_file"],
#             # for segment-wise process on long speech
#             segment_size=2.4,
#             hop_size=0.8,
#             normalize_segment_scale=False,
#             show_progressbar=True,
#             ref_channel=None,
#             normalize_output_wav=True,
#             device=self.device,
#         )
#     def inference(self, speech_path, ref_channel=0):
#         speech, sr = soundfile.read(speech_path)
#         speech = speech[:, ref_channel]
#         assert speech.dim() == 1
#         enh_speech = self.separate_speech(speech[None, ], fs=sr)
#         if len(enh_speech) == 1:
#             return enh_speech[0]
#         return enh_speech
# class Speech_Enh_SS_MC:
#     """Speech Enhancement or Separation in multi-channel"""
#     def __init__(self, device="cuda", model_name=None, ref_channel=4):
#         self.model_name = model_name
#         self.ref_channel = ref_channel
#         self.device = device
#         print("Initializing ESPnet Enh to %s" % device)
#         self._initialize_model()
#     def _initialize_model(self):
#         from espnet_model_zoo.downloader import ModelDownloader
#         from espnet2.bin.enh_inference import SeparateSpeech
#         d = ModelDownloader()
#         cfg = d.download_and_unpack(self.model_name)
#         self.separate_speech = SeparateSpeech(
#             train_config=cfg["train_config"],
#             model_file=cfg["model_file"],
#             # for segment-wise process on long speech
#             segment_size=2.4,
#             hop_size=0.8,
#             normalize_segment_scale=False,
#             show_progressbar=True,
#             ref_channel=self.ref_channel,
#             normalize_output_wav=True,
#             device=self.device,
#         )
#     def inference(self, speech_path):
#         speech, sr = soundfile.read(speech_path)
#         speech = speech.T
#         enh_speech = self.separate_speech(speech[None, ...], fs=sr)
#         if len(enh_speech) == 1:
#             return enh_speech[0]
#         return enh_speech
class Speech_Enh_SS_SC:
    """Speech Enhancement or Separation in single-channel
    Example usage:
        enh_model = Speech_Enh_SS("cuda")
        enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
    """
    def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"):
        self.model_name = model_name
        self.device = device
        print("Initializing ESPnet Enh to %s" % device)
        self._initialize_model()
    def _initialize_model(self):
        from espnet_model_zoo.downloader import ModelDownloader
        from espnet2.bin.enh_inference import SeparateSpeech
        d = ModelDownloader()
        cfg = d.download_and_unpack(self.model_name)
        self.separate_speech = SeparateSpeech(
            train_config=cfg["train_config"],
            model_file=cfg["model_file"],
            # for segment-wise process on long speech
            segment_size=2.4,
            hop_size=0.8,
            normalize_segment_scale=False,
            show_progressbar=True,
            ref_channel=None,
            normalize_output_wav=True,
            device=self.device,
        )
    def inference(self, speech_path, ref_channel=0):
        speech, sr = soundfile.read(speech_path)
        speech = speech[:, ref_channel]
        # speech = torch.from_numpy(speech)
        # assert speech.dim() == 1
        enh_speech = self.separate_speech(speech[None, ...], fs=sr)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        # if len(enh_speech) == 1:
        soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr)
            # return enh_speech[0]
        # return enh_speech
        # else: 
        #     print("############")
        #     audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        #     soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
        #     audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        #     soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
        #     audio_filename = merge_audio(audio_filename_1, audio_filename_2)
        return audio_filename
class Speech_SS:
    def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"):
        self.model_name = model_name
        self.device = device
        print("Initializing ESPnet SS to %s" % device)
        self._initialize_model()
    def _initialize_model(self):
        from espnet_model_zoo.downloader import ModelDownloader
        from espnet2.bin.enh_inference import SeparateSpeech
        d = ModelDownloader()
        cfg = d.download_and_unpack(self.model_name)
        self.separate_speech = SeparateSpeech(
            train_config=cfg["train_config"],
            model_file=cfg["model_file"],
            # for segment-wise process on long speech
            segment_size=2.4,
            hop_size=0.8,
            normalize_segment_scale=False,
            show_progressbar=True,
            ref_channel=None,
            normalize_output_wav=True,
            device=self.device,
        )
    def inference(self, speech_path):
        speech, sr = soundfile.read(speech_path)
        enh_speech = self.separate_speech(speech[None, ...], fs=sr)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        if len(enh_speech) == 1:
            soundfile.write(audio_filename, enh_speech[0], samplerate=sr)
        else:
            # print("############")
            audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
            audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
            audio_filename = merge_audio(audio_filename_1, audio_filename_2)
        return audio_filename
class ConversationBot:
    def __init__(self):
        print("Initializing AudioGPT")
        self.llm = OpenAI(temperature=0)
        self.t2i = T2I(device="cuda:1")
        self.i2t = ImageCaptioning(device="cuda:0")
        self.t2a = T2A(device="cuda:0")
        self.tts = TTS(device="cpu")
        self.t2s = T2S(device="cpu")
        self.i2a = I2A(device="cuda:0")
        self.a2t = A2T(device="cpu")
        self.asr = ASR(device="cuda:0")
        self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0")
        # self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0")
        self.SS = Speech_SS(device="cuda:0")
        self.inpaint = Inpaint(device="cuda:0")
        self.tts_ood = TTS_OOD(device="cpu")
        self.geneface = GeneFace(device="cuda:0")
        self.detection = SoundDetection(device="cpu")
        self.binaural = Binaural(device="cuda:0")
        self.extraction = SoundExtraction(device="cuda:0")
        self.TSD = TargetSoundDetection(device="cuda:0")
        self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
    def init_tools(self, interaction_type):
        if interaction_type == 'text':
            self.tools = [
                Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
                     description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
                                 "The input to this tool should be a string, representing the text used to generate image. "),
                Tool(name="Get Photo Description", func=self.i2t.inference,
                     description="useful for when you want to know what is inside the photo. receives image_path as input. "
                                 "The input to this tool should be a string, representing the image_path. "),
                Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
                     description="useful for when you want to generate an audio from a user input text and it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to generate audio."),
                Tool(
                    name="Style Transfer", func= self.tts_ood.inference,
                    description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
                                "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
                                "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
                Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
                     description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
                                 "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
                                 "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
                                 "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
                                 "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
                Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
                     description="useful for when you want to convert a user input text into speech audio it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to be converted to speech."),
                # Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference,
                #      description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), "
                #                  "or separate each speech from the speech mixture (single-channel), receives audio_path as input."
                #                  "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference,
                     description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Speech Separation In Single-Channel", func=self.SS.inference,
                     description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                # Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference,
                #      description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input."
                #                  "The input to this tool should be a string, representing the audio_path."),                                 
                Tool(name="Generate Audio From The Image", func=self.i2a.inference,
                     description="useful for when you want to generate an audio based on an image."
                                  "The input to this tool should be a string, representing the image_path. "),
                Tool(name="Generate Text From The Audio", func=self.a2t.inference,
                     description="useful for when you want to describe an audio in text, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn,
                     description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, "
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Transcribe Speech", func=self.asr.inference,
                     description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
                     description="useful for when you want to generate a talking human portrait video given a input audio."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference,
                     description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
                     description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
                     description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
                                 "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
                Tool(name="Target Sound Detection", func=self.TSD.inference,
                     description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
                                 "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]       
            self.agent = initialize_agent(
                self.tools,
                self.llm,
                agent="conversational-react-description",
                verbose=True,
                memory=self.memory,
                return_intermediate_steps=True,
                agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
            return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
        else:
            self.tools = [
                Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
                     description="useful for when you want to generate an audio from a user input text and it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to generate audio."),
                Tool(
                    name="Style Transfer", func= self.tts_ood.inference,
                    description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
                                "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
                                "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
                Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
                     description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
                                 "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
                                 "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
                                 "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
                                 "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
                Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
                     description="useful for when you want to convert a user input text into speech audio it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to be converted to speech."),
                Tool(name="Generate Text From The Audio", func=self.a2t.inference,
                     description="useful for when you want to describe an audio in text, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
                     description="useful for when you want to generate a talking human portrait video given a input audio."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
                     description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
                     description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
                                 "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
                Tool(name="Target Sound Detection", func=self.TSD.inference,
                     description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
                                 "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]                
            self.agent = initialize_agent(
                self.tools,
                self.llm,
                agent="conversational-react-description",
                verbose=True,
                memory=self.memory,
                return_intermediate_steps=True,
                agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
            return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
    def run_text(self, text, state):
        print("===============Running run_text =============")
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        if res['intermediate_steps'] == []:
            print("======>Current memory:\n %s" % self.agent.memory)
            response = res['output']
            state = state + [(text, response)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
        else:
            tool = res['intermediate_steps'][0][0].tool
            if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
                print("======>Current memory:\n %s" % self.agent.memory)
                response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
            elif tool == "Transcribe Speech":
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
            elif tool == "Detect The Sound Event From The Audio":
                image_filename = res['intermediate_steps'][0][1]
                response = res['output'] + f"*{image_filename}*"
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)       
            elif tool == "Audio Inpainting":
                audio_filename = res['intermediate_steps'][0][0].tool_input
                image_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True)
            elif tool == "Generate a talking human portrait video given a input Audio":
                video_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output'] 
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False)
            print("======>Current memory:\n %s" % self.agent.memory)
            response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
            audio_filename = res['intermediate_steps'][0][1]
            state = state + [(text, response)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
    def run_image_or_audio(self, file, state, txt):
        file_type = file.name[-3:]
        if file_type == "wav":
            print("===============Running run_audio =============")
            print("Inputs:", file, state)
            print("======>Previous memory:\n %s" % self.agent.memory)
            audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            # audio_load = whisper.load_audio(file.name)
            audio_load, sr = soundfile.read(file.name)
            soundfile.write(audio_filename, audio_load, samplerate = sr)
            description = self.a2t.inference(audio_filename)
            Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \
                           "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description)
            AI_prompt = "Received.  "
            self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
            print("======>Current memory:\n %s" % self.agent.memory)
            #state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)]
            state = state + [(f"*{audio_filename}*", AI_prompt)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False)
        else:
            print("===============Running run_image =============")
            print("Inputs:", file, state)
            print("======>Previous memory:\n %s" % self.agent.memory)
            image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
            print("======>Auto Resize Image...")
            img = Image.open(file.name)
            width, height = img.size
            ratio = min(512 / width, 512 / height)
            width_new, height_new = (round(width * ratio), round(height * ratio))
            img = img.resize((width_new, height_new))
            img = img.convert('RGB')
            img.save(image_filename, "PNG")
            print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
            description = self.i2t.inference(image_filename)
            Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
                           "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
            AI_prompt = "Received.  "
            self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
            print("======>Current memory:\n %s" % self.agent.memory)
            state = state + [(f"*{image_filename}*", AI_prompt)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False)
    def speech(self, speech_input, state):
        input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        text = self.asr.translate_english(speech_input)
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        if res['intermediate_steps'] == []:
            print("======>Current memory:\n %s" % self.agent.memory)
            response = res['output']
            output_audio_filename = self.tts.inference(response)
            state = state + [(text, response)]
            print("Outputs:", state)
            return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
        else:
            tool = res['intermediate_steps'][0][0].tool
            if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
                print("======>Current memory:\n %s" % self.agent.memory)
                response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
                output_audio_filename = self.tts.inference(res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
            elif tool == "Transcribe Speech":
                print("======>Current memory:\n %s" % self.agent.memory)
                output_audio_filename = self.tts.inference(res['output'])
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
            elif tool == "Detect The Sound Event From The Audio":
                print("======>Current memory:\n %s" % self.agent.memory)
                image_filename = res['intermediate_steps'][0][1]
                output_audio_filename = self.tts.inference(res['output'])
                response = res['output'] + f"*{image_filename}*"
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)   
            elif tool == "Generate a talking human portrait video given a input Audio":
                video_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output'] 
                output_audio_filename = self.tts.inference(res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True)
            print("======>Current memory:\n %s" % self.agent.memory)
            response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
            audio_filename = res['intermediate_steps'][0][1]
            Res = "The audio file has been generated and the audio is "
            output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename)
            print(output_audio_filename)
            state = state + [(text, response)]
            response = res['output'] 
            print("Outputs:", state)
            return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
    def inpainting(self, state, audio_filename, image_filename):
        print("===============Running inpainting =============")
        print("Inputs:", state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename)       
        AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"*{new_image_filename}*"
        output_audio_filename = self.tts.inference(AI_prompt)
        self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt
        print("======>Current memory:\n %s" % self.agent.memory)
        state = state + [(f"Audio Inpainting", AI_prompt)]
        print("Outputs:", state)
        return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False)
    def clear_audio(self):
        return gr.Audio.update(value=None, visible=False)
    def clear_input_audio(self):
        return gr.Audio.update(value=None)
    def clear_image(self):
        return gr.Image.update(value=None, visible=False)
    def clear_video(self):
        return gr.Video.update(value=None, visible=False)
    def clear_button(self):
        return gr.Button.update(visible=False)
if __name__ == '__main__':
    bot = ConversationBot()
    with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
        with gr.Row():
            gr.Markdown("## AudioGPT")
        chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False) 
        state = gr.State([])
        with gr.Row() as select_raws:
            with gr.Column(scale=0.7):
                interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type')
            with gr.Column(scale=0.3, min_width=0):
                select = gr.Button("Select")
        
        with gr.Row(visible=False) as text_input_raws:
            with gr.Column(scale=0.7):
                txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
            with gr.Column(scale=0.1, min_width=0):
                run = gr.Button("🏃♂️Run")
            with gr.Column(scale=0.1, min_width=0):
                clear_txt = gr.Button("🔄Clear️")
            with gr.Column(scale=0.1, min_width=0):
                btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"])
        with gr.Row():
            outaudio = gr.Audio(visible=False)
        with gr.Row():
            with gr.Column(scale=0.3, min_width=0):
                outvideo = gr.Video(visible=False)
        with gr.Row():
            show_mel = gr.Image(type="filepath",tool='sketch',visible=False)
        with gr.Row():
            run_button = gr.Button("Predict Masked Place",visible=False)        
        with gr.Row(visible=False) as speech_input_raws: 
            with gr.Column(scale=0.7):
                speech_input = gr.Audio(source="microphone", type="filepath", label="Input")
            with gr.Column(scale=0.15, min_width=0):
                submit_btn = gr.Button("🏃♂️Submit")
            with gr.Column(scale=0.15, min_width=0):
                clear_speech = gr.Button("🔄Clear️")
            with gr.Row():
                speech_output = gr.Audio(label="Output",visible=False)
        select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws])
        txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
        txt.submit(lambda: "", None, txt)
        run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
        run.click(lambda: "", None, txt)
        btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo])
        run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button])
        clear_txt.click(bot.memory.clear)
        clear_txt.click(lambda: [], None, chatbot)
        clear_txt.click(lambda: [], None, state)
        clear_txt.click(lambda:None, None, txt)
        clear_txt.click(bot.clear_button, None, run_button)
        clear_txt.click(bot.clear_image, None, show_mel)
        clear_txt.click(bot.clear_audio, None, outaudio)
        clear_txt.click(bot.clear_video, None, outvideo)
        submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo])
        clear_speech.click(bot.clear_input_audio, None, speech_input)
        clear_speech.click(bot.clear_audio, None, speech_output)
        clear_speech.click(lambda: [], None, state)
        clear_speech.click(bot.clear_video, None, outvideo)
        demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | 
	[
  "langchain.llms.openai.OpenAI",
  "langchain.agents.tools.Tool",
  "langchain.chains.conversation.memory.ConversationBufferMemory",
  "langchain.agents.initialize.initialize_agent"
] | 
	[((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'), ((4131, 4155), 'numpy.hstack', 'np.hstack', (['merged_signal'], {}), '(merged_signal)\n', (4140, 4155), True, 'import numpy as np\n'), ((4176, 4217), 'numpy.asarray', 'np.asarray', (['merged_signal'], {'dtype': 'np.int16'}), '(merged_signal, dtype=np.int16)\n', (4186, 4217), True, 'import numpy as np\n'), ((4298, 4348), 'scipy.io.wavfile.write', 'wavfile.write', (['audio_filename', 'sr_2', 'merged_signal'], {}), '(audio_filename, sr_2, merged_signal)\n', (4311, 4348), True, 'import scipy.io.wavfile as wavfile\n'), ((53, 79), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((4682, 4786), 'diffusers.StableDiffusionPipeline.from_pretrained', 'StableDiffusionPipeline.from_pretrained', (['"""runwayml/stable-diffusion-v1-5"""'], {'torch_dtype': 'torch.float16'}), "('runwayml/stable-diffusion-v1-5',\n    torch_dtype=torch.float16)\n", (4721, 4786), False, 'from diffusers import StableDiffusionPipeline\n'), ((4820, 4892), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4849, 4892), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((4926, 5005), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4962, 5005), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((5043, 5163), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n    text_refine_tokenizer, device=self.device)\n", (5051, 5163), False, 'from transformers import pipeline\n'), ((5875, 5945), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (5904, 5945), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6704, 6792), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (6718, 6792), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((6861, 6883), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (6875, 6883), False, 'from omegaconf import OmegaConf\n'), ((6900, 6937), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (6923, 6937), False, 'from ldm.util import instantiate_from_config\n'), ((7187, 7205), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (7198, 7205), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((7378, 7405), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7399, 7405), True, 'import numpy as np\n'), ((8501, 8560), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (8512, 8560), False, 'import torch\n'), ((10157, 10217), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (10172, 10217), False, 'import soundfile\n'), ((10695, 10783), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (10709, 10783), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((10852, 10874), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (10866, 10874), False, 'from omegaconf import OmegaConf\n'), ((10891, 10928), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (10914, 10928), False, 'from ldm.util import instantiate_from_config\n'), ((11178, 11196), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (11189, 11196), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((11399, 11426), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (11420, 11426), True, 'import numpy as np\n'), ((11759, 11776), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (11769, 11776), False, 'from PIL import Image\n'), ((12720, 12779), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (12731, 12779), False, 'import torch\n'), ((13377, 13437), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (13392, 13437), False, 'import soundfile\n'), ((13967, 13996), 'inference.tts.PortaSpeech.TTSInference', 'TTSInference', (['self.hp', 'device'], {}), '(self.hp, device)\n', (13979, 13996), False, 'from inference.tts.PortaSpeech import TTSInference\n'), ((14039, 14095), 'utils.hparams.set_hparams', 'set_hparams', ([], {'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(exp_name=self.exp_name, print_hparams=False)\n', (14050, 14095), False, 'from utils.hparams import set_hparams\n'), ((14345, 14399), 'soundfile.write', 'soundfile.write', (['audio_filename', 'out'], {'samplerate': '(22050)'}), '(audio_filename, out, samplerate=22050)\n', (14360, 14399), False, 'import soundfile\n'), ((14913, 14948), 'inference.svs.ds_e2e.DiffSingerE2EInfer', 'DiffSingerE2EInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (14931, 14948), False, 'from inference.svs.ds_e2e import DiffSingerE2EInfer\n'), ((15398, 15474), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (15409, 15474), False, 'from utils.hparams import set_hparams\n'), ((18112, 18174), 'soundfile.write', 'soundfile.write', (['audio_filename', 'wav'], {'samplerate': 'self.model.fs'}), '(audio_filename, wav, samplerate=self.model.fs)\n', (18127, 18174), False, 'import soundfile\n'), ((18678, 18711), 'inference.tts.GenerSpeech.GenerSpeechInfer', 'GenerSpeechInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (18694, 18711), False, 'from inference.tts.GenerSpeech import GenerSpeechInfer\n'), ((18754, 18830), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (18765, 18830), False, 'from utils.hparams import set_hparams\n'), ((18914, 18941), 'os.path.exists', 'os.path.exists', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18928, 18941), False, 'import os\n'), ((20145, 20233), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (20159, 20233), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((20354, 20376), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (20368, 20376), False, 'from omegaconf import OmegaConf\n'), ((20393, 20430), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (20416, 20430), False, 'from ldm.util import instantiate_from_config\n'), ((20737, 20755), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (20748, 20755), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((21544, 21574), 'scipy.io.wavfile.read', 'wavfile.read', (['input_audio_path'], {}), '(input_audio_path)\n', (21556, 21574), True, 'import scipy.io.wavfile as wavfile\n'), ((21855, 21915), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (21871, 21915), False, 'import librosa\n'), ((22182, 22209), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22198, 22209), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((22619, 22679), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (22635, 22679), False, 'import librosa\n'), ((22945, 22972), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22961, 22972), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((23512, 23539), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (23533, 23539), True, 'import numpy as np\n'), ((23834, 23899), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["batch['mask']"], {'size': 'c.shape[-2:]'}), "(batch['mask'], size=c.shape[-2:])\n", (23865, 23899), False, 'import torch\n'), ((23960, 23985), 'torch.cat', 'torch.cat', (['(c, cc)'], {'dim': '(1)'}), '((c, cc), dim=1)\n', (23969, 23985), False, 'import torch\n'), ((24438, 24495), 'torch.clamp', 'torch.clamp', (["((batch['mel'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mel'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24449, 24495), False, 'import torch\n'), ((24506, 24564), 'torch.clamp', 'torch.clamp', (["((batch['mask'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mask'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24517, 24564), False, 'import torch\n'), ((24584, 24643), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (24595, 24643), False, 'import torch\n'), ((24954, 24983), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (24976, 24983), False, 'import torch\n'), ((25002, 25035), 'PIL.Image.open', 'Image.open', (["mel_and_mask['image']"], {}), "(mel_and_mask['image'])\n", (25012, 25035), False, 'from PIL import Image\n'), ((25055, 25087), 'PIL.Image.open', 'Image.open', (["mel_and_mask['mask']"], {}), "(mel_and_mask['mask'])\n", (25065, 25087), False, 'from PIL import Image\n'), ((25306, 25398), 'numpy.pad', 'np.pad', (['mask', '((0, 0), (0, mel_len - mask.shape[1]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mask, ((0, 0), (0, mel_len - mask.shape[1])), mode='constant',\n    constant_values=0)\n", (25312, 25398), True, 'import numpy as np\n'), ((26269, 26327), 'soundfile.write', 'soundfile.write', (['audio_filename', 'gen_wav'], {'samplerate': '(16000)'}), '(audio_filename, gen_wav, samplerate=16000)\n', (26284, 26327), False, 'import soundfile\n'), ((26527, 26568), 'whisper.load_model', 'whisper.load_model', (['"""base"""'], {'device': 'device'}), "('base', device=device)\n", (26545, 26568), False, 'import whisper\n'), ((26623, 26653), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (26641, 26653), False, 'import whisper\n'), ((26670, 26696), 'whisper.pad_or_trim', 'whisper.pad_or_trim', (['audio'], {}), '(audio)\n', (26689, 26696), False, 'import whisper\n'), ((26831, 26856), 'whisper.DecodingOptions', 'whisper.DecodingOptions', ([], {}), '()\n', (26854, 26856), False, 'import whisper\n'), ((26874, 26914), 'whisper.decode', 'whisper.decode', (['self.model', 'mel', 'options'], {}), '(self.model, mel, options)\n', (26888, 26914), False, 'import whisper\n'), ((27312, 27373), 'audio_to_text.inference_waveform.AudioCapModel', 'AudioCapModel', (['"""audio_to_text/audiocaps_cntrstv_cnn14rnn_trm"""'], {}), "('audio_to_text/audiocaps_cntrstv_cnn14rnn_trm')\n", (27325, 27373), False, 'from audio_to_text.inference_waveform import AudioCapModel\n'), ((27427, 27457), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (27445, 27457), False, 'import whisper\n'), ((27863, 27884), 'audio_to_face.GeneFace_binding.GeneFaceInfer', 'GeneFaceInfer', (['device'], {}), '(device)\n', (27876, 27884), False, 'from audio_to_face.GeneFace_binding import GeneFaceInfer\n'), ((29085, 29267), 'audio_infer.pytorch.models.PVT', 'PVT', ([], {'sample_rate': 'self.sample_rate', 'window_size': 'self.window_size', 'hop_size': 'self.hop_size', 'mel_bins': 'self.mel_bins', 'fmin': 'self.fmin', 'fmax': 'self.fmax', 'classes_num': 'self.classes_num'}), '(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=\n    self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,\n    classes_num=self.classes_num)\n', (29088, 29267), False, 'from audio_infer.pytorch.models import PVT\n'), ((29306, 29364), 'torch.load', 'torch.load', (['self.checkpoint_path'], {'map_location': 'self.device'}), '(self.checkpoint_path, map_location=self.device)\n', (29316, 29364), False, 'import torch\n'), ((29531, 29592), 'librosa.core.load', 'librosa.core.load', (['audio_path'], {'sr': 'self.sample_rate', 'mono': '(True)'}), '(audio_path, sr=self.sample_rate, mono=True)\n', (29548, 29592), False, 'import librosa\n'), ((29672, 29698), 'torch.from_numpy', 'torch.from_numpy', (['waveform'], {}), '(waveform)\n', (29688, 29698), False, 'import torch\n'), ((30663, 30711), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 4)'}), '(2, 1, sharex=True, figsize=(10, 4))\n', (30675, 30711), True, 'import matplotlib.pyplot as plt\n'), ((31471, 31489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31487, 31489), True, 'import matplotlib.pyplot as plt\n'), ((31578, 31605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_filename'], {}), '(image_filename)\n', (31589, 31605), True, 'import matplotlib.pyplot as plt\n'), ((31952, 31958), 'sound_extraction.utils.stft.STFT', 'STFT', ([], {}), '()\n', (31956, 31958), False, 'from sound_extraction.utils.stft import STFT\n'), ((32046, 32073), 'torch.load', 'torch.load', (['self.model_file'], {}), '(self.model_file)\n', (32056, 32073), False, 'import torch\n'), ((32416, 32436), 'sound_extraction.utils.wav_io.load_wav', 'load_wav', (['audio_path'], {}), '(audio_path)\n', (32424, 32436), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33156, 33189), 'sound_extraction.utils.wav_io.save_wav', 'save_wav', (['est_wav', 'audio_filename'], {}), '(est_wav, audio_filename)\n', (33164, 33189), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33854, 33920), 'src.models.BinauralNetwork', 'BinauralNetwork', ([], {'view_dim': '(7)', 'warpnet_layers': '(4)', 'warpnet_channels': '(64)'}), '(view_dim=7, warpnet_layers=4, warpnet_channels=64)\n', (33869, 33920), False, 'from src.models import BinauralNetwork\n'), ((34119, 34171), 'librosa.load', 'librosa.load', ([], {'path': 'audio_path', 'sr': 'self.sr', 'mono': '(True)'}), '(path=audio_path, sr=self.sr, mono=True)\n', (34131, 34171), False, 'import librosa\n'), ((34187, 34209), 'torch.from_numpy', 'torch.from_numpy', (['mono'], {}), '(mono)\n', (34203, 34209), False, 'import torch\n'), ((34311, 34331), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (34325, 34331), False, 'import random\n'), ((34433, 34455), 'torch.from_numpy', 'torch.from_numpy', (['view'], {}), '(view)\n', (34449, 34455), False, 'import torch\n'), ((35860, 35918), 'torch.cat', 'torch.cat', (["[chunk['binaural'] for chunk in chunks]"], {'dim': '(-1)'}), "([chunk['binaural'] for chunk in chunks], dim=-1)\n", (35869, 35918), False, 'import torch\n'), ((36151, 36196), 'torchaudio.save', 'torchaudio.save', (['audio_filename', 'binaural', 'sr'], {}), '(audio_filename, binaural, sr)\n', (36166, 36196), False, 'import torchaudio\n'), ((36806, 36819), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (36816, 36819), True, 'import numpy as np\n'), ((36849, 36890), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'self.device'}), "('ViT-B/32', device=self.device)\n", (36858, 36890), False, 'import clip\n'), ((37034, 37147), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth"""'], {'map_location': '"""cpu"""'}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth',\n    map_location='cpu')\n", (37044, 37147), False, 'import torch\n'), ((37460, 37610), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'\n    , map_location=lambda storage, loc: storage)\n", (37470, 37610), False, 'import torch\n'), ((38016, 38103), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth"""'], {}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')\n", (38026, 38103), False, 'import torch\n'), ((38122, 38208), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth"""'], {}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')\n", (38132, 38208), False, 'import torch\n'), ((38290, 38321), 'soundfile.read', 'sf.read', (['fname'], {'dtype': '"""float32"""'}), "(fname, dtype='float32')\n", (38297, 38321), True, 'import soundfile as sf\n'), ((38439, 38469), 'librosa.resample', 'librosa.resample', (['y', 'sr', '(22050)'], {}), '(y, sr, 22050)\n', (38455, 38469), False, 'import librosa\n'), ((39559, 39586), 'torch.from_numpy', 'torch.from_numpy', (['embedding'], {}), '(embedding)\n', (39575, 39586), False, 'import torch\n'), ((39796, 39820), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (39812, 39820), False, 'import torch\n'), ((40169, 40218), 'target_sound_detection.src.utils.median_filter', 'median_filter', (['pred'], {'window_size': '(1)', 'threshold': '(0.5)'}), '(pred, window_size=1, threshold=0.5)\n', (40182, 40218), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((45032, 45049), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (45047, 45049), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((45135, 45379), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n    'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n    False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n    True, device=self.device)\n", (45149, 45379), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((45613, 45640), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (45627, 45640), False, 'import soundfile\n'), ((47031, 47048), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (47046, 47048), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((47134, 47378), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n    'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n    False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n    True, device=self.device)\n", (47148, 47378), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((47597, 47624), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (47611, 47624), False, 'import soundfile\n'), ((48487, 48508), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (48493, 48508), False, 'from langchain.llms.openai import OpenAI\n'), ((49380, 49452), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (49404, 49452), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((75007, 75049), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75022, 75049), True, 'import gradio as gr\n'), ((75098, 75125), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (75113, 75125), True, 'import gradio as gr\n'), ((75168, 75210), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75183, 75210), True, 'import gradio as gr\n'), ((75253, 75295), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75268, 75295), True, 'import gradio as gr\n'), ((75339, 75370), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (75355, 75370), True, 'import gradio as gr\n'), ((75437, 75493), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (75446, 75493), True, 'import gradio as gr\n'), ((75583, 75645), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""AudioGPT"""', 'visible': '(False)'}), "(elem_id='chatbot', label='AudioGPT', visible=False)\n", (75593, 75645), True, 'import gradio as gr\n'), ((75663, 75675), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (75671, 75675), True, 'import gradio as gr\n'), ((130, 156), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'import os\n'), ((205, 231), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((293, 319), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'import os\n'), ((399, 425), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'import os\n'), ((493, 519), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (509, 519), False, 'import os\n'), ((9915, 9930), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9928, 9930), False, 'import torch\n'), ((13139, 13154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13152, 13154), False, 'import torch\n'), ((18985, 19005), 'numpy.load', 'np.load', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18992, 19005), True, 'import numpy as np\n'), ((20568, 20593), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20591, 20593), False, 'import torch\n'), ((20544, 20564), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20556, 20564), False, 'import torch\n'), ((20599, 20618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (20611, 20618), False, 'import torch\n'), ((21770, 21796), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (21785, 21796), False, 'import librosa\n'), ((22054, 22113), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22060, 22113), True, 'import numpy as np\n'), ((22534, 22560), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (22549, 22560), False, 'import librosa\n'), ((22818, 22877), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22824, 22877), True, 'import numpy as np\n'), ((25442, 25457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25455, 25457), False, 'import torch\n'), ((27988, 28016), 'os.path.basename', 'os.path.basename', (['audio_path'], {}), '(audio_path)\n', (28004, 28016), False, 'import os\n'), ((29774, 29789), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29787, 29789), False, 'import torch\n'), ((31019, 31067), 'numpy.arange', 'np.arange', (['(0)', 'frames_num', 'self.frames_per_second'], {}), '(0, frames_num, self.frames_per_second)\n', (31028, 31067), True, 'import numpy as np\n'), ((31105, 31154), 'numpy.arange', 'np.arange', (['(0)', '(frames_num / self.frames_per_second)'], {}), '(0, frames_num / self.frames_per_second)\n', (31114, 31154), True, 'import numpy as np\n'), ((31187, 31206), 'numpy.arange', 'np.arange', (['(0)', 'top_k'], {}), '(0, top_k)\n', (31196, 31206), True, 'import numpy as np\n'), ((40409, 40472), 'target_sound_detection.src.utils.decode_with_timestamps', 'decode_with_timestamps', (['target_event', 'filtered_pred[index_k, :]'], {}), '(target_event, filtered_pred[index_k, :])\n', (40431, 40472), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((47818, 47879), 'soundfile.write', 'soundfile.write', (['audio_filename', 'enh_speech[0]'], {'samplerate': 'sr'}), '(audio_filename, enh_speech[0], samplerate=sr)\n', (47833, 47879), False, 'import soundfile\n'), ((57651, 57951), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (57667, 57951), False, 'from langchain.agents.initialize import initialize_agent\n'), ((62445, 62745), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (62461, 62745), False, 'from langchain.agents.initialize import initialize_agent\n'), ((67323, 67348), 'soundfile.read', 'soundfile.read', (['file.name'], {}), '(file.name)\n', (67337, 67348), False, 'import soundfile\n'), ((67361, 67419), 'soundfile.write', 'soundfile.write', (['audio_filename', 'audio_load'], {'samplerate': 'sr'}), '(audio_filename, audio_load, samplerate=sr)\n', (67376, 67419), False, 'import soundfile\n'), ((68723, 68744), 'PIL.Image.open', 'Image.open', (['file.name'], {}), '(file.name)\n', (68733, 68744), False, 'from PIL import Image\n'), ((74811, 74841), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74826, 74841), True, 'import gradio as gr\n'), ((74843, 74898), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'new_audio_filename', 'visible': '(True)'}), '(value=new_audio_filename, visible=True)\n', (74858, 74898), True, 'import gradio as gr\n'), ((74900, 74930), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74915, 74930), True, 'import gradio as gr\n'), ((74932, 74963), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74948, 74963), True, 'import gradio as gr\n'), ((75516, 75524), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75522, 75524), True, 'import gradio as gr\n'), ((75538, 75564), 'gradio.Markdown', 'gr.Markdown', (['"""## AudioGPT"""'], {}), "('## AudioGPT')\n", (75549, 75564), True, 'import gradio as gr\n'), ((75690, 75698), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75696, 75698), True, 'import gradio as gr\n'), ((75985, 76006), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (75991, 76006), True, 'import gradio as gr\n'), ((76544, 76552), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76550, 76552), True, 'import gradio as gr\n'), ((76577, 76600), 'gradio.Audio', 'gr.Audio', ([], {'visible': '(False)'}), '(visible=False)\n', (76585, 76600), True, 'import gradio as gr\n'), ((76614, 76622), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76620, 76622), True, 'import gradio as gr\n'), ((76740, 76748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76746, 76748), True, 'import gradio as gr\n'), ((76773, 76828), 'gradio.Image', 'gr.Image', ([], {'type': '"""filepath"""', 'tool': '"""sketch"""', 'visible': '(False)'}), "(type='filepath', tool='sketch', visible=False)\n", (76781, 76828), True, 'import gradio as gr\n'), ((76840, 76848), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76846, 76848), True, 'import gradio as gr\n'), ((76875, 76923), 'gradio.Button', 'gr.Button', (['"""Predict Masked Place"""'], {'visible': '(False)'}), "('Predict Masked Place', visible=False)\n", (76884, 76923), True, 'import gradio as gr\n'), ((76945, 76966), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (76951, 76966), True, 'import gradio as gr\n'), ((5967, 6057), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n    'Salesforce/blip-image-captioning-base')\n", (6011, 6057), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6968, 7004), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (6978, 7004), False, 'import torch\n'), ((7534, 7562), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (7550, 7562), False, 'import torch\n'), ((9137, 9162), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9160, 9162), False, 'import torch\n'), ((9661, 9681), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (9669, 9681), True, 'import numpy as np\n'), ((10959, 10995), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (10969, 10995), False, 'import torch\n'), ((11555, 11583), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (11571, 11583), False, 'import torch\n'), ((13731, 13756), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13754, 13756), False, 'import torch\n'), ((14598, 14623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14621, 14623), False, 'import torch\n'), ((16575, 16600), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16598, 16600), False, 'import torch\n'), ((18374, 18399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18397, 18399), False, 'import torch\n'), ((20461, 20497), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (20471, 20497), False, 'import torch\n'), ((23657, 23685), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (23673, 23685), False, 'import torch\n'), ((26711, 26745), 'whisper.log_mel_spectrogram', 'whisper.log_mel_spectrogram', (['audio'], {}), '(audio)\n', (26738, 26745), False, 'import whisper\n'), ((27767, 27792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27790, 27792), False, 'import torch\n'), ((30235, 30267), 'numpy.max', 'np.max', (['framewise_output'], {'axis': '(0)'}), '(framewise_output, axis=0)\n', (30241, 30267), True, 'import numpy as np\n'), ((30742, 30754), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (30748, 30754), True, 'import numpy as np\n'), ((31244, 31265), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (31252, 31265), True, 'import numpy as np\n'), ((32456, 32478), 'torch.tensor', 'torch.tensor', (['waveform'], {}), '(waveform)\n', (32468, 32478), False, 'import torch\n'), ((34703, 34725), 'random.randint', 'random.randint', (['(0)', 'm_a'], {}), '(0, m_a)\n', (34717, 34725), False, 'import random\n'), ((35521, 35536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35534, 35536), False, 'import torch\n'), ((35938, 35974), 'torch.clamp', 'torch.clamp', (['binaural'], {'min': '(-1)', 'max': '(1)'}), '(binaural, min=-1, max=1)\n', (35949, 35974), False, 'import torch\n'), ((38646, 38665), 'clip.tokenize', 'clip.tokenize', (['text'], {}), '(text)\n', (38659, 38665), False, 'import clip\n'), ((49580, 49968), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n    description=\n    'useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n    )\n", (49584, 49968), False, 'from langchain.agents.tools import Tool\n'), ((50029, 50275), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n    'useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n    )\n", (50033, 50275), False, 'from langchain.agents.tools import Tool\n'), ((50340, 50626), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n    description=\n    'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n    )\n", (50344, 50626), False, 'from langchain.agents.tools import Tool\n'), ((50687, 51161), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n    'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n    )\n", (50691, 51161), False, 'from langchain.agents.tools import Tool\n'), ((51281, 52061), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n    \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n    func=self.t2s.inference, description=\n    \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n    )\n', (51285, 52061), False, 'from langchain.agents.tools import Tool\n'), ((52228, 52530), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n    inference, description=\n    'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n    )\n", (52232, 52530), False, 'from langchain.agents.tools import Tool\n'), ((53100, 53426), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Enhancement In Single-Channel"""', 'func': 'self.SE_SS_SC.inference', 'description': '"""useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Enhancement In Single-Channel', func=self.SE_SS_SC.\n    inference, description=\n    'useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (53104, 53426), False, 'from langchain.agents.tools import Tool\n'), ((53486, 53762), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Separation In Single-Channel"""', 'func': 'self.SS.inference', 'description': '"""useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Separation In Single-Channel', func=self.SS.inference,\n    description=\n    'useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (53490, 53762), False, 'from langchain.agents.tools import Tool\n'), ((54246, 54479), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From The Image"""', 'func': 'self.i2a.inference', 'description': '"""useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. """'}), "(name='Generate Audio From The Image', func=self.i2a.inference,\n    description=\n    'useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. '\n    )\n", (54250, 54479), False, 'from langchain.agents.tools import Tool\n'), ((54541, 54792), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n    description=\n    'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (54545, 54792), False, 'from langchain.agents.tools import Tool\n'), ((54854, 55191), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Audio Inpainting"""', 'func': 'self.inpaint.show_mel_fn', 'description': '"""useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path."""'}), "(name='Audio Inpainting', func=self.inpaint.show_mel_fn, description=\n    'useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path.'\n    )\n", (54858, 55191), False, 'from langchain.agents.tools import Tool\n'), ((55257, 55513), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Transcribe Speech"""', 'func': 'self.asr.inference', 'description': '"""useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Transcribe Speech', func=self.asr.inference, description=\n    'useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (55261, 55513), False, 'from langchain.agents.tools import Tool\n'), ((55578, 55869), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n    func=self.geneface.inference, description=\n    'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n    )\n", (55582, 55869), False, 'from langchain.agents.tools import Tool\n'), ((55930, 56296), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Detect The Sound Event From The Audio"""', 'func': 'self.detection.inference', 'description': '"""useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Detect The Sound Event From The Audio', func=self.detection.\n    inference, description=\n    'useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (55934, 56296), False, 'from langchain.agents.tools import Tool\n'), ((56356, 56654), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sythesize Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Sythesize Binaural Audio From A Mono Audio Input', func=self.\n    binaural.inference, description=\n    'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (56360, 56654), False, 'from langchain.agents.tools import Tool\n'), ((56714, 57120), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n    'Extract Sound Event From Mixture Audio Based On Language Description',\n    func=self.extraction.inference, description=\n    'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n    )\n", (56718, 57120), False, 'from langchain.agents.tools import Tool\n'), ((57176, 57569), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n    'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n    )\n", (57180, 57569), False, 'from langchain.agents.tools import Tool\n'), ((58070, 58093), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58079, 58093), True, 'import gradio as gr\n'), ((58095, 58119), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58104, 58119), True, 'import gradio as gr\n'), ((58121, 58144), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58130, 58144), True, 'import gradio as gr\n'), ((58146, 58170), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58155, 58170), True, 'import gradio as gr\n'), ((58228, 58514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n    description=\n    'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n    )\n", (58232, 58514), False, 'from langchain.agents.tools import Tool\n'), ((58575, 59049), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n    'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n    )\n", (58579, 59049), False, 'from langchain.agents.tools import Tool\n'), ((59169, 59949), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n    \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n    func=self.t2s.inference, description=\n    \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n    )\n', (59173, 59949), False, 'from langchain.agents.tools import Tool\n'), ((60116, 60418), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n    inference, description=\n    'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n    )\n", (60120, 60418), False, 'from langchain.agents.tools import Tool\n'), ((60478, 60729), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n    description=\n    'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (60482, 60729), False, 'from langchain.agents.tools import Tool\n'), ((60791, 61082), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n    func=self.geneface.inference, description=\n    'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n    )\n", (60795, 61082), False, 'from langchain.agents.tools import Tool\n'), ((61143, 61440), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Generate Binaural Audio From A Mono Audio Input', func=self.\n    binaural.inference, description=\n    'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (61147, 61440), False, 'from langchain.agents.tools import Tool\n'), ((61500, 61906), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n    'Extract Sound Event From Mixture Audio Based On Language Description',\n    func=self.extraction.inference, description=\n    'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n    )\n", (61504, 61906), False, 'from langchain.agents.tools import Tool\n'), ((61962, 62355), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n    'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n    )\n", (61966, 62355), False, 'from langchain.agents.tools import Tool\n'), ((62864, 62888), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62873, 62888), True, 'import gradio as gr\n'), ((62890, 62914), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62899, 62914), True, 'import gradio as gr\n'), ((62916, 62940), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62925, 62940), True, 'import gradio as gr\n'), ((62942, 62965), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (62951, 62965), True, 'import gradio as gr\n'), ((63585, 63615), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63600, 63615), True, 'import gradio as gr\n'), ((63617, 63647), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63632, 63647), True, 'import gradio as gr\n'), ((63649, 63679), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63664, 63679), True, 'import gradio as gr\n'), ((63681, 63712), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63697, 63712), True, 'import gradio as gr\n'), ((66706, 66757), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (66721, 66757), True, 'import gradio as gr\n'), ((66758, 66788), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66773, 66788), True, 'import gradio as gr\n'), ((66790, 66820), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66805, 66820), True, 'import gradio as gr\n'), ((66822, 66853), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66838, 66853), True, 'import gradio as gr\n'), ((68295, 68346), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (68310, 68346), True, 'import gradio as gr\n'), ((68347, 68377), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (68362, 68377), True, 'import gradio as gr\n'), ((69917, 69947), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69932, 69947), True, 'import gradio as gr\n'), ((69949, 69979), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69964, 69979), True, 'import gradio as gr\n'), ((70735, 70762), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (70750, 70762), True, 'import gradio as gr\n'), ((70764, 70822), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (70779, 70822), True, 'import gradio as gr\n'), ((70830, 70860), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (70845, 70860), True, 'import gradio as gr\n'), ((73858, 73885), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73873, 73885), True, 'import gradio as gr\n'), ((73887, 73945), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73902, 73945), True, 'import gradio as gr\n'), ((73953, 73983), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (73968, 73983), True, 'import gradio as gr\n'), ((75732, 75752), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (75741, 75752), True, 'import gradio as gr\n'), ((75789, 75865), 'gradio.Radio', 'gr.Radio', ([], {'choices': "['text', 'speech']", 'value': '"""text"""', 'label': '"""Interaction Type"""'}), "(choices=['text', 'speech'], value='text', label='Interaction Type')\n", (75797, 75865), True, 'import gradio as gr\n'), ((75883, 75916), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (75892, 75916), True, 'import gradio as gr\n'), ((75943, 75962), 'gradio.Button', 'gr.Button', (['"""Select"""'], {}), "('Select')\n", (75952, 75962), True, 'import gradio as gr\n'), ((76044, 76064), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (76053, 76064), True, 'import gradio as gr\n'), ((76219, 76252), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76228, 76252), True, 'import gradio as gr\n'), ((76276, 76301), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Run"""'], {}), "('🏃\\u200d♂️Run')\n", (76285, 76301), True, 'import gradio as gr\n'), ((76314, 76347), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76323, 76347), True, 'import gradio as gr\n'), ((76377, 76397), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (76386, 76397), True, 'import gradio as gr\n'), ((76415, 76448), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76424, 76448), True, 'import gradio as gr\n'), ((76472, 76530), 'gradio.UploadButton', 'gr.UploadButton', (['"""🖼️Upload"""'], {'file_types': "['image', 'audio']"}), "('🖼️Upload', file_types=['image', 'audio'])\n", (76487, 76530), True, 'import gradio as gr\n'), ((76641, 76674), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (76650, 76674), True, 'import gradio as gr\n'), ((76703, 76726), 'gradio.Video', 'gr.Video', ([], {'visible': '(False)'}), '(visible=False)\n', (76711, 76726), True, 'import gradio as gr\n'), ((77007, 77027), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (77016, 77027), True, 'import gradio as gr\n'), ((77060, 77121), 'gradio.Audio', 'gr.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""', 'label': '"""Input"""'}), "(source='microphone', type='filepath', label='Input')\n", (77068, 77121), True, 'import gradio as gr\n'), ((77139, 77173), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77148, 77173), True, 'import gradio as gr\n'), ((77204, 77232), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Submit"""'], {}), "('🏃\\u200d♂️Submit')\n", (77213, 77232), True, 'import gradio as gr\n'), ((77245, 77279), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77254, 77279), True, 'import gradio as gr\n'), ((77312, 77332), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (77321, 77332), True, 'import gradio as gr\n'), ((77350, 77358), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (77356, 77358), True, 'import gradio as gr\n'), ((77392, 77431), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output"""', 'visible': '(False)'}), "(label='Output', visible=False)\n", (77400, 77431), True, 'import gradio as gr\n'), ((4265, 4277), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4275, 4277), False, 'import uuid\n'), ((6139, 6161), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6149, 6161), False, 'from PIL import Image\n'), ((20850, 20871), 'torch.from_numpy', 'torch.from_numpy', (['mel'], {}), '(mel)\n', (20866, 20871), False, 'import torch\n'), ((20926, 20948), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (20942, 20948), False, 'import torch\n'), ((31997, 32012), 'sound_extraction.model.LASSNet.LASSNet', 'LASSNet', (['device'], {}), '(device)\n', (32004, 32012), False, 'from sound_extraction.model.LASSNet import LASSNet\n'), ((38499, 38549), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {}), '(y, **self.MEL_ARGS)\n', (38529, 38549), False, 'import librosa\n'), ((64244, 64274), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64259, 64274), True, 'import gradio as gr\n'), ((64276, 64306), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64291, 64306), True, 'import gradio as gr\n'), ((64308, 64338), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64323, 64338), True, 'import gradio as gr\n'), ((64340, 64371), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64356, 64371), True, 'import gradio as gr\n'), ((71452, 71479), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71467, 71479), True, 'import gradio as gr\n'), ((71481, 71539), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71496, 71539), True, 'import gradio as gr\n'), ((71547, 71577), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (71562, 71577), True, 'import gradio as gr\n'), ((5271, 5283), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5281, 5283), False, 'import uuid\n'), ((9377, 9399), 'torch.FloatTensor', 'torch.FloatTensor', (['wav'], {}), '(wav)\n', (9394, 9399), False, 'import torch\n'), ((10120, 10132), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10130, 10132), False, 'import uuid\n'), ((13340, 13352), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13350, 13352), False, 'import uuid\n'), ((14308, 14320), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14318, 14320), False, 'import uuid\n'), ((16178, 16190), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16188, 16190), False, 'import uuid\n'), ((18075, 18087), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18085, 18087), False, 'import uuid\n'), ((19487, 19499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19497, 19499), False, 'import uuid\n'), ((23290, 23302), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23300, 23302), False, 'import uuid\n'), ((26117, 26129), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26127, 26129), False, 'import uuid\n'), ((26232, 26244), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26242, 26244), False, 'import uuid\n'), ((31541, 31553), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (31551, 31553), False, 'import uuid\n'), ((33070, 33082), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (33080, 33082), False, 'import uuid\n'), ((34346, 34386), 'numpy.loadtxt', 'np.loadtxt', (['self.position_file[rand_int]'], {}), '(self.position_file[rand_int])\n', (34356, 34386), True, 'import numpy as np\n'), ((36088, 36100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36098, 36100), False, 'import uuid\n'), ((45879, 45891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45889, 45891), False, 'import uuid\n'), ((47744, 47756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47754, 47756), False, 'import uuid\n'), ((64588, 64618), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64603, 64618), True, 'import gradio as gr\n'), ((64620, 64650), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64635, 64650), True, 'import gradio as gr\n'), ((64652, 64682), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64667, 64682), True, 'import gradio as gr\n'), ((64684, 64715), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64700, 64715), True, 'import gradio as gr\n'), ((70081, 70093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (70091, 70093), False, 'import uuid\n'), ((71927, 71954), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71942, 71954), True, 'import gradio as gr\n'), ((71956, 72014), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71971, 72014), True, 'import gradio as gr\n'), ((72022, 72052), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72037, 72052), True, 'import gradio as gr\n'), ((76088, 76183), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n    'Enter text and press enter, or upload an image')\n", (76098, 76183), True, 'import gradio as gr\n'), ((47987, 47999), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47997, 47999), False, 'import uuid\n'), ((48159, 48171), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48169, 48171), False, 'import uuid\n'), ((65068, 65098), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65083, 65098), True, 'import gradio as gr\n'), ((65100, 65130), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65115, 65130), True, 'import gradio as gr\n'), ((65132, 65162), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65147, 65162), True, 'import gradio as gr\n'), ((65164, 65195), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65180, 65195), True, 'import gradio as gr\n'), ((67208, 67220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (67218, 67220), False, 'import uuid\n'), ((68627, 68639), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (68637, 68639), False, 'import uuid\n'), ((72538, 72565), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (72553, 72565), True, 'import gradio as gr\n'), ((72567, 72625), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (72582, 72625), True, 'import gradio as gr\n'), ((72633, 72663), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72648, 72663), True, 'import gradio as gr\n'), ((65632, 65683), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (65647, 65683), True, 'import gradio as gr\n'), ((65684, 65714), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65699, 65714), True, 'import gradio as gr\n'), ((65716, 65767), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'image_filename', 'visible': '(True)'}), '(value=image_filename, visible=True)\n', (65731, 65767), True, 'import gradio as gr\n'), ((65768, 65798), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(True)'}), '(visible=True)\n', (65784, 65798), True, 'import gradio as gr\n'), ((73124, 73151), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73139, 73151), True, 'import gradio as gr\n'), ((73153, 73211), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73168, 73211), True, 'import gradio as gr\n'), ((73219, 73270), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (73234, 73270), True, 'import gradio as gr\n'), ((66196, 66226), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66211, 66226), True, 'import gradio as gr\n'), ((66228, 66279), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (66243, 66279), True, 'import gradio as gr\n'), ((66280, 66310), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66295, 66310), True, 'import gradio as gr\n'), ((66312, 66343), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66328, 66343), True, 'import gradio as gr\n')] | 
| 
	from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
__version__ = "0.0.20"
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
    template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
    integration_namespace.integration_cli,
    name="integration",
    help=integration_namespace.__doc__,
)
def version_callback(show_version: bool) -> None:
    if show_version:
        typer.echo(f"langchain-cli {__version__}")
        raise typer.Exit()
@app.callback()
def main(
    version: bool = typer.Option(
        False,
        "--version",
        "-v",
        help="Print the current CLI version.",
        callback=version_callback,
        is_eager=True,
    ),
):
    pass
@app.command()
def serve(
    *,
    port: Annotated[
        Optional[int], typer.Option(help="The port to run the server on")
    ] = None,
    host: Annotated[
        Optional[str], typer.Option(help="The host to run the server on")
    ] = None,
) -> None:
    """
    Start the LangServe app, whether it's a template or an app.
    """
    # see if is a template
    try:
        project_dir = get_package_root()
        pyproject = project_dir / "pyproject.toml"
        get_langserve_export(pyproject)
    except KeyError:
        # not a template
        app_namespace.serve(port=port, host=host)
    else:
        # is a template
        template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
    app()
 | 
	[
  "langchain_cli.namespaces.template.serve",
  "langchain_cli.utils.packages.get_package_root",
  "langchain_cli.namespaces.app.serve",
  "langchain_cli.utils.packages.get_langserve_export"
] | 
	[((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current CLI version."""', 'callback': 'version_callback', 'is_eager': '(True)'}), "(False, '--version', '-v', help=\n    'Print the current CLI version.', callback=version_callback, is_eager=True)\n", (964, 1076), False, 'import typer\n'), ((834, 876), 'typer.echo', 'typer.echo', (['f"""langchain-cli {__version__}"""'], {}), "(f'langchain-cli {__version__}')\n", (844, 876), False, 'import typer\n'), ((891, 903), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (901, 903), False, 'import typer\n'), ((1543, 1561), 'langchain_cli.utils.packages.get_package_root', 'get_package_root', ([], {}), '()\n', (1559, 1561), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1621, 1652), 'langchain_cli.utils.packages.get_langserve_export', 'get_langserve_export', (['pyproject'], {}), '(pyproject)\n', (1641, 1652), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1791, 1837), 'langchain_cli.namespaces.template.serve', 'template_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1815, 1837), True, 'from langchain_cli.namespaces import template as template_namespace\n'), ((1707, 1748), 'langchain_cli.namespaces.app.serve', 'app_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1726, 1748), True, 'from langchain_cli.namespaces import app as app_namespace\n'), ((1219, 1269), 'typer.Option', 'typer.Option', ([], {'help': '"""The port to run the server on"""'}), "(help='The port to run the server on')\n", (1231, 1269), False, 'import typer\n'), ((1328, 1378), 'typer.Option', 'typer.Option', ([], {'help': '"""The host to run the server on"""'}), "(help='The host to run the server on')\n", (1340, 1378), False, 'import typer\n')] | 
| 
	import os
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Redis
from langchain_text_splitters import RecursiveCharacterTextSplitter
from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL
def ingest_documents():
    """
    Ingest PDF to Redis from the data/ directory that
    contains Edgar 10k filings data for Nike.
    """
    # Load list of pdfs
    company_name = "Nike"
    data_path = "data/"
    doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0]
    print("Parsing 10k filing doc for NIKE", doc)  # noqa: T201
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1500, chunk_overlap=100, add_start_index=True
    )
    loader = UnstructuredFileLoader(doc, mode="single", strategy="fast")
    chunks = loader.load_and_split(text_splitter)
    print("Done preprocessing. Created", len(chunks), "chunks of the original pdf")  # noqa: T201
    # Create vectorstore
    embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
    _ = Redis.from_texts(
        # appending this little bit can sometimes help with semantic retrieval
        # especially with multiple companies
        texts=[f"Company: {company_name}. " + chunk.page_content for chunk in chunks],
        metadatas=[chunk.metadata for chunk in chunks],
        embedding=embedder,
        index_name=INDEX_NAME,
        index_schema=INDEX_SCHEMA,
        redis_url=REDIS_URL,
    )
if __name__ == "__main__":
    ingest_documents()
 | 
	[
  "langchain_community.vectorstores.Redis.from_texts",
  "langchain_community.document_loaders.UnstructuredFileLoader",
  "langchain_community.embeddings.HuggingFaceEmbeddings",
  "langchain_text_splitters.RecursiveCharacterTextSplitter"
] | 
	[((726, 818), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(100)', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=100,\n    add_start_index=True)\n', (756, 818), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((842, 901), 'langchain_community.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['doc'], {'mode': '"""single"""', 'strategy': '"""fast"""'}), "(doc, mode='single', strategy='fast')\n", (864, 901), False, 'from langchain_community.document_loaders import UnstructuredFileLoader\n'), ((1091, 1136), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'EMBED_MODEL'}), '(model_name=EMBED_MODEL)\n', (1112, 1136), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((1146, 1394), 'langchain_community.vectorstores.Redis.from_texts', 'Redis.from_texts', ([], {'texts': "[(f'Company: {company_name}. ' + chunk.page_content) for chunk in chunks]", 'metadatas': '[chunk.metadata for chunk in chunks]', 'embedding': 'embedder', 'index_name': 'INDEX_NAME', 'index_schema': 'INDEX_SCHEMA', 'redis_url': 'REDIS_URL'}), "(texts=[(f'Company: {company_name}. ' + chunk.page_content) for\n    chunk in chunks], metadatas=[chunk.metadata for chunk in chunks],\n    embedding=embedder, index_name=INDEX_NAME, index_schema=INDEX_SCHEMA,\n    redis_url=REDIS_URL)\n", (1162, 1394), False, 'from langchain_community.vectorstores import Redis\n'), ((572, 601), 'os.path.join', 'os.path.join', (['data_path', 'file'], {}), '(data_path, file)\n', (584, 601), False, 'import os\n'), ((614, 635), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (624, 635), False, 'import os\n')] | 
| 
	from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
 | 
	[
  "langchain_community.document_loaders.CSVLoader",
  "langchain.indexes.VectorstoreIndexCreator"
] | 
	[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] | 
| 
	from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question 
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim 
Claim: {question}
Passage:"""  # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
 | 
	[
  "langchain_core.prompts.prompt.PromptTemplate.from_template"
] | 
	[((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] | 
| 
	"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
if TYPE_CHECKING:
    from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
    try:
        from langchainhub import Client
    except ImportError as e:
        raise ImportError(
            "Could not import langchainhub, please install with `pip install "
            "langchainhub`."
        ) from e
    # Client logic will also attempt to load URL/key from environment variables
    return Client(api_url, api_key=api_key)
def push(
    repo_full_name: str,
    object: Any,
    *,
    api_url: Optional[str] = None,
    api_key: Optional[str] = None,
    parent_commit_hash: Optional[str] = "latest",
    new_repo_is_public: bool = True,
    new_repo_description: str = "",
) -> str:
    """
    Pushes an object to the hub and returns the URL it can be viewed at in a browser.
    :param repo_full_name: The full name of the repo to push to in the format of
        `owner/repo`.
    :param object: The LangChain to serialize and push to the hub.
    :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
        if you have an api key set, or a localhost instance if not.
    :param api_key: The API key to use to authenticate with the LangChain Hub API.
    :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
        to the latest commit automatically.
    :param new_repo_is_public: Whether the repo should be public. Defaults to
        True (Public by default).
    :param new_repo_description: The description of the repo. Defaults to an empty
        string.
    """
    client = _get_client(api_url=api_url, api_key=api_key)
    manifest_json = dumps(object)
    message = client.push(
        repo_full_name,
        manifest_json,
        parent_commit_hash=parent_commit_hash,
        new_repo_is_public=new_repo_is_public,
        new_repo_description=new_repo_description,
    )
    return message
def pull(
    owner_repo_commit: str,
    *,
    api_url: Optional[str] = None,
    api_key: Optional[str] = None,
) -> Any:
    """
    Pulls an object from the hub and returns it as a LangChain object.
    :param owner_repo_commit: The full name of the repo to pull from in the format of
        `owner/repo:commit_hash`.
    :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
        if you have an api key set, or a localhost instance if not.
    :param api_key: The API key to use to authenticate with the LangChain Hub API.
    """
    client = _get_client(api_url=api_url, api_key=api_key)
    if hasattr(client, "pull_repo"):
        # >= 0.1.15
        res_dict = client.pull_repo(owner_repo_commit)
        obj = loads(json.dumps(res_dict["manifest"]))
        if isinstance(obj, BasePromptTemplate):
            if obj.metadata is None:
                obj.metadata = {}
            obj.metadata["lc_hub_owner"] = res_dict["owner"]
            obj.metadata["lc_hub_repo"] = res_dict["repo"]
            obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"]
        return obj
    # Then it's < 0.1.15
    resp: str = client.pull(owner_repo_commit)
    return loads(resp)
 | 
	[
  "langchain_core.load.load.loads",
  "langchain_core.load.dump.dumps",
  "langchainhub.Client"
] | 
	[((746, 778), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (752, 778), False, 'from langchainhub import Client\n'), ((1979, 1992), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1984, 1992), False, 'from langchain_core.load.dump import dumps\n'), ((3453, 3464), 'langchain_core.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (3458, 3464), False, 'from langchain_core.load.load import loads\n'), ((3004, 3036), 'json.dumps', 'json.dumps', (["res_dict['manifest']"], {}), "(res_dict['manifest'])\n", (3014, 3036), False, 'import json\n')] | 
| 
	from pathlib import Path
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores import Neo4jVector
from langchain_text_splitters import TokenTextSplitter
txt_path = Path(__file__).parent / "dune.txt"
graph = Neo4jGraph()
# Load the text file
loader = TextLoader(str(txt_path))
documents = loader.load()
# Define chunking strategy
parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24)
child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24)
# Store parent-child patterns into graph
parent_documents = parent_splitter.split_documents(documents)
for parent in parent_documents:
    child_documents = child_splitter.split_documents([parent])
    params = {
        "parent": parent.page_content,
        "children": [c.page_content for c in child_documents],
    }
    graph.query(
        """
    CREATE (p:Parent {text: $parent})
    WITH p 
    UNWIND $children AS child
    CREATE (c:Child {text: child})
    CREATE (c)-[:HAS_PARENT]->(p)
    """,
        params,
    )
# Calculate embedding values on the child nodes
Neo4jVector.from_existing_graph(
    OpenAIEmbeddings(),
    index_name="retrieval",
    node_label="Child",
    text_node_properties=["text"],
    embedding_node_property="embedding",
)
 | 
	[
  "langchain_community.embeddings.openai.OpenAIEmbeddings",
  "langchain_community.graphs.Neo4jGraph",
  "langchain_text_splitters.TokenTextSplitter"
] | 
	[((371, 383), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (381, 383), False, 'from langchain_community.graphs import Neo4jGraph\n'), ((513, 564), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(24)'}), '(chunk_size=512, chunk_overlap=24)\n', (530, 564), False, 'from langchain_text_splitters import TokenTextSplitter\n'), ((582, 633), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(100)', 'chunk_overlap': '(24)'}), '(chunk_size=100, chunk_overlap=24)\n', (599, 633), False, 'from langchain_text_splitters import TokenTextSplitter\n'), ((1251, 1269), 'langchain_community.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1267, 1269), False, 'from langchain_community.embeddings.openai import OpenAIEmbeddings\n'), ((327, 341), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (331, 341), False, 'from pathlib import Path\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
    "CREATE FULLTEXT INDEX entity IF NOT EXISTS"
    " FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	from importlib import metadata
from langchain_core._api import (
    surface_langchain_beta_warnings,
    surface_langchain_deprecation_warnings,
)
try:
    __version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
    # Case where package metadata is not available.
    __version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
 | 
	[
  "langchain_core._api.surface_langchain_beta_warnings",
  "langchain_core._api.surface_langchain_deprecation_warnings"
] | 
	[((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_beta_warnings', 'surface_langchain_beta_warnings', ([], {}), '()\n', (387, 389), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((173, 202), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (189, 202), False, 'from importlib import metadata\n')] | 
| 
	# ruff: noqa: E402
"""Main entrypoint into package."""
import warnings
from importlib import metadata
from typing import Any, Optional
from langchain_core._api.deprecation import surface_langchain_deprecation_warnings
try:
    __version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
    # Case where package metadata is not available.
    __version__ = ""
del metadata  # optional, avoids polluting the results of dir(__package__)
def _warn_on_import(name: str, replacement: Optional[str] = None) -> None:
    """Warn on import of deprecated module."""
    from langchain.utils.interactive_env import is_interactive_env
    if is_interactive_env():
        # No warnings for interactive environments.
        # This is done to avoid polluting the output of interactive environments
        # where users rely on auto-complete and may trigger this warning
        # even if they are not using any deprecated modules
        return
    if replacement:
        warnings.warn(
            f"Importing {name} from langchain root module is no longer supported. "
            f"Please use {replacement} instead."
        )
    else:
        warnings.warn(
            f"Importing {name} from langchain root module is no longer supported."
        )
# Surfaces Deprecation and Pending Deprecation warnings from langchain.
surface_langchain_deprecation_warnings()
def __getattr__(name: str) -> Any:
    if name == "MRKLChain":
        from langchain.agents import MRKLChain
        _warn_on_import(name, replacement="langchain.agents.MRKLChain")
        return MRKLChain
    elif name == "ReActChain":
        from langchain.agents import ReActChain
        _warn_on_import(name, replacement="langchain.agents.ReActChain")
        return ReActChain
    elif name == "SelfAskWithSearchChain":
        from langchain.agents import SelfAskWithSearchChain
        _warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain")
        return SelfAskWithSearchChain
    elif name == "ConversationChain":
        from langchain.chains import ConversationChain
        _warn_on_import(name, replacement="langchain.chains.ConversationChain")
        return ConversationChain
    elif name == "LLMBashChain":
        raise ImportError(
            "This module has been moved to langchain-experimental. "
            "For more details: "
            "https://github.com/langchain-ai/langchain/discussions/11352."
            "To access this code, install it with `pip install langchain-experimental`."
            "`from langchain_experimental.llm_bash.base "
            "import LLMBashChain`"
        )
    elif name == "LLMChain":
        from langchain.chains import LLMChain
        _warn_on_import(name, replacement="langchain.chains.LLMChain")
        return LLMChain
    elif name == "LLMCheckerChain":
        from langchain.chains import LLMCheckerChain
        _warn_on_import(name, replacement="langchain.chains.LLMCheckerChain")
        return LLMCheckerChain
    elif name == "LLMMathChain":
        from langchain.chains import LLMMathChain
        _warn_on_import(name, replacement="langchain.chains.LLMMathChain")
        return LLMMathChain
    elif name == "QAWithSourcesChain":
        from langchain.chains import QAWithSourcesChain
        _warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain")
        return QAWithSourcesChain
    elif name == "VectorDBQA":
        from langchain.chains import VectorDBQA
        _warn_on_import(name, replacement="langchain.chains.VectorDBQA")
        return VectorDBQA
    elif name == "VectorDBQAWithSourcesChain":
        from langchain.chains import VectorDBQAWithSourcesChain
        _warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain")
        return VectorDBQAWithSourcesChain
    elif name == "InMemoryDocstore":
        from langchain.docstore import InMemoryDocstore
        _warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore")
        return InMemoryDocstore
    elif name == "Wikipedia":
        from langchain.docstore import Wikipedia
        _warn_on_import(name, replacement="langchain.docstore.Wikipedia")
        return Wikipedia
    elif name == "Anthropic":
        from langchain_community.llms import Anthropic
        _warn_on_import(name, replacement="langchain_community.llms.Anthropic")
        return Anthropic
    elif name == "Banana":
        from langchain_community.llms import Banana
        _warn_on_import(name, replacement="langchain_community.llms.Banana")
        return Banana
    elif name == "CerebriumAI":
        from langchain_community.llms import CerebriumAI
        _warn_on_import(name, replacement="langchain_community.llms.CerebriumAI")
        return CerebriumAI
    elif name == "Cohere":
        from langchain_community.llms import Cohere
        _warn_on_import(name, replacement="langchain_community.llms.Cohere")
        return Cohere
    elif name == "ForefrontAI":
        from langchain_community.llms import ForefrontAI
        _warn_on_import(name, replacement="langchain_community.llms.ForefrontAI")
        return ForefrontAI
    elif name == "GooseAI":
        from langchain_community.llms import GooseAI
        _warn_on_import(name, replacement="langchain_community.llms.GooseAI")
        return GooseAI
    elif name == "HuggingFaceHub":
        from langchain_community.llms import HuggingFaceHub
        _warn_on_import(name, replacement="langchain_community.llms.HuggingFaceHub")
        return HuggingFaceHub
    elif name == "HuggingFaceTextGenInference":
        from langchain_community.llms import HuggingFaceTextGenInference
        _warn_on_import(
            name, replacement="langchain_community.llms.HuggingFaceTextGenInference"
        )
        return HuggingFaceTextGenInference
    elif name == "LlamaCpp":
        from langchain_community.llms import LlamaCpp
        _warn_on_import(name, replacement="langchain_community.llms.LlamaCpp")
        return LlamaCpp
    elif name == "Modal":
        from langchain_community.llms import Modal
        _warn_on_import(name, replacement="langchain_community.llms.Modal")
        return Modal
    elif name == "OpenAI":
        from langchain_community.llms import OpenAI
        _warn_on_import(name, replacement="langchain_community.llms.OpenAI")
        return OpenAI
    elif name == "Petals":
        from langchain_community.llms import Petals
        _warn_on_import(name, replacement="langchain_community.llms.Petals")
        return Petals
    elif name == "PipelineAI":
        from langchain_community.llms import PipelineAI
        _warn_on_import(name, replacement="langchain_community.llms.PipelineAI")
        return PipelineAI
    elif name == "SagemakerEndpoint":
        from langchain_community.llms import SagemakerEndpoint
        _warn_on_import(name, replacement="langchain_community.llms.SagemakerEndpoint")
        return SagemakerEndpoint
    elif name == "StochasticAI":
        from langchain_community.llms import StochasticAI
        _warn_on_import(name, replacement="langchain_community.llms.StochasticAI")
        return StochasticAI
    elif name == "Writer":
        from langchain_community.llms import Writer
        _warn_on_import(name, replacement="langchain_community.llms.Writer")
        return Writer
    elif name == "HuggingFacePipeline":
        from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
        _warn_on_import(
            name,
            replacement="langchain_community.llms.huggingface_pipeline.HuggingFacePipeline",
        )
        return HuggingFacePipeline
    elif name == "FewShotPromptTemplate":
        from langchain_core.prompts import FewShotPromptTemplate
        _warn_on_import(
            name, replacement="langchain_core.prompts.FewShotPromptTemplate"
        )
        return FewShotPromptTemplate
    elif name == "Prompt":
        from langchain_core.prompts import PromptTemplate
        _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate")
        # it's renamed as prompt template anyways
        # this is just for backwards compat
        return PromptTemplate
    elif name == "PromptTemplate":
        from langchain_core.prompts import PromptTemplate
        _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate")
        return PromptTemplate
    elif name == "BasePromptTemplate":
        from langchain_core.prompts import BasePromptTemplate
        _warn_on_import(name, replacement="langchain_core.prompts.BasePromptTemplate")
        return BasePromptTemplate
    elif name == "ArxivAPIWrapper":
        from langchain_community.utilities import ArxivAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.ArxivAPIWrapper"
        )
        return ArxivAPIWrapper
    elif name == "GoldenQueryAPIWrapper":
        from langchain_community.utilities import GoldenQueryAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper"
        )
        return GoldenQueryAPIWrapper
    elif name == "GoogleSearchAPIWrapper":
        from langchain_community.utilities import GoogleSearchAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper"
        )
        return GoogleSearchAPIWrapper
    elif name == "GoogleSerperAPIWrapper":
        from langchain_community.utilities import GoogleSerperAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper"
        )
        return GoogleSerperAPIWrapper
    elif name == "PowerBIDataset":
        from langchain_community.utilities import PowerBIDataset
        _warn_on_import(
            name, replacement="langchain_community.utilities.PowerBIDataset"
        )
        return PowerBIDataset
    elif name == "SearxSearchWrapper":
        from langchain_community.utilities import SearxSearchWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.SearxSearchWrapper"
        )
        return SearxSearchWrapper
    elif name == "WikipediaAPIWrapper":
        from langchain_community.utilities import WikipediaAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.WikipediaAPIWrapper"
        )
        return WikipediaAPIWrapper
    elif name == "WolframAlphaAPIWrapper":
        from langchain_community.utilities import WolframAlphaAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper"
        )
        return WolframAlphaAPIWrapper
    elif name == "SQLDatabase":
        from langchain_community.utilities import SQLDatabase
        _warn_on_import(name, replacement="langchain_community.utilities.SQLDatabase")
        return SQLDatabase
    elif name == "FAISS":
        from langchain_community.vectorstores import FAISS
        _warn_on_import(name, replacement="langchain_community.vectorstores.FAISS")
        return FAISS
    elif name == "ElasticVectorSearch":
        from langchain_community.vectorstores import ElasticVectorSearch
        _warn_on_import(
            name, replacement="langchain_community.vectorstores.ElasticVectorSearch"
        )
        return ElasticVectorSearch
    # For backwards compatibility
    elif name == "SerpAPIChain" or name == "SerpAPIWrapper":
        from langchain_community.utilities import SerpAPIWrapper
        _warn_on_import(
            name, replacement="langchain_community.utilities.SerpAPIWrapper"
        )
        return SerpAPIWrapper
    elif name == "verbose":
        from langchain.globals import _verbose
        _warn_on_import(
            name,
            replacement=(
                "langchain.globals.set_verbose() / langchain.globals.get_verbose()"
            ),
        )
        return _verbose
    elif name == "debug":
        from langchain.globals import _debug
        _warn_on_import(
            name,
            replacement=(
                "langchain.globals.set_debug() / langchain.globals.get_debug()"
            ),
        )
        return _debug
    elif name == "llm_cache":
        from langchain.globals import _llm_cache
        _warn_on_import(
            name,
            replacement=(
                "langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()"
            ),
        )
        return _llm_cache
    else:
        raise AttributeError(f"Could not find: {name}")
__all__ = [
    "LLMChain",
    "LLMCheckerChain",
    "LLMMathChain",
    "ArxivAPIWrapper",
    "GoldenQueryAPIWrapper",
    "SelfAskWithSearchChain",
    "SerpAPIWrapper",
    "SerpAPIChain",
    "SearxSearchWrapper",
    "GoogleSearchAPIWrapper",
    "GoogleSerperAPIWrapper",
    "WolframAlphaAPIWrapper",
    "WikipediaAPIWrapper",
    "Anthropic",
    "Banana",
    "CerebriumAI",
    "Cohere",
    "ForefrontAI",
    "GooseAI",
    "Modal",
    "OpenAI",
    "Petals",
    "PipelineAI",
    "StochasticAI",
    "Writer",
    "BasePromptTemplate",
    "Prompt",
    "FewShotPromptTemplate",
    "PromptTemplate",
    "ReActChain",
    "Wikipedia",
    "HuggingFaceHub",
    "SagemakerEndpoint",
    "HuggingFacePipeline",
    "SQLDatabase",
    "PowerBIDataset",
    "FAISS",
    "MRKLChain",
    "VectorDBQA",
    "ElasticVectorSearch",
    "InMemoryDocstore",
    "ConversationChain",
    "VectorDBQAWithSourcesChain",
    "QAWithSourcesChain",
    "LlamaCpp",
    "HuggingFaceTextGenInference",
]
 | 
	[
  "langchain.utils.interactive_env.is_interactive_env",
  "langchain_core._api.deprecation.surface_langchain_deprecation_warnings"
] | 
	[((1348, 1388), 'langchain_core._api.deprecation.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (1386, 1388), False, 'from langchain_core._api.deprecation import surface_langchain_deprecation_warnings\n'), ((243, 272), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (259, 272), False, 'from importlib import metadata\n'), ((658, 678), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (676, 678), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((990, 1119), 'warnings.warn', 'warnings.warn', (['f"""Importing {name} from langchain root module is no longer supported. Please use {replacement} instead."""'], {}), "(\n    f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instead.'\n    )\n", (1003, 1119), False, 'import warnings\n'), ((1166, 1256), 'warnings.warn', 'warnings.warn', (['f"""Importing {name} from langchain root module is no longer supported."""'], {}), "(\n    f'Importing {name} from langchain root module is no longer supported.')\n", (1179, 1256), False, 'import warnings\n')] | 
| 
	import os
from langchain_community.document_loaders import JSONLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_elasticsearch import ElasticsearchStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID")
ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic")
ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD")
ES_URL = os.getenv("ES_URL", "http://localhost:9200")
if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD:
    es_connection_details = {
        "es_cloud_id": ELASTIC_CLOUD_ID,
        "es_user": ELASTIC_USERNAME,
        "es_password": ELASTIC_PASSWORD,
    }
else:
    es_connection_details = {"es_url": ES_URL}
# Metadata extraction function
def metadata_func(record: dict, metadata: dict) -> dict:
    metadata["name"] = record.get("name")
    metadata["summary"] = record.get("summary")
    metadata["url"] = record.get("url")
    metadata["category"] = record.get("category")
    metadata["updated_at"] = record.get("updated_at")
    return metadata
## Load Data
loader = JSONLoader(
    file_path="./data/documents.json",
    jq_schema=".[]",
    content_key="content",
    metadata_func=metadata_func,
)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=250)
all_splits = text_splitter.split_documents(loader.load())
# Add to vectorDB
vectorstore = ElasticsearchStore.from_documents(
    documents=all_splits,
    embedding=HuggingFaceEmbeddings(
        model_name="all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}
    ),
    **es_connection_details,
    index_name="workplace-search-example",
)
 | 
	[
  "langchain_community.embeddings.HuggingFaceEmbeddings",
  "langchain_community.document_loaders.JSONLoader",
  "langchain_text_splitters.RecursiveCharacterTextSplitter"
] | 
	[((279, 308), 'os.getenv', 'os.getenv', (['"""ELASTIC_CLOUD_ID"""'], {}), "('ELASTIC_CLOUD_ID')\n", (288, 308), False, 'import os\n'), ((328, 368), 'os.getenv', 'os.getenv', (['"""ELASTIC_USERNAME"""', '"""elastic"""'], {}), "('ELASTIC_USERNAME', 'elastic')\n", (337, 368), False, 'import os\n'), ((388, 417), 'os.getenv', 'os.getenv', (['"""ELASTIC_PASSWORD"""'], {}), "('ELASTIC_PASSWORD')\n", (397, 417), False, 'import os\n'), ((427, 471), 'os.getenv', 'os.getenv', (['"""ES_URL"""', '"""http://localhost:9200"""'], {}), "('ES_URL', 'http://localhost:9200')\n", (436, 471), False, 'import os\n'), ((1113, 1232), 'langchain_community.document_loaders.JSONLoader', 'JSONLoader', ([], {'file_path': '"""./data/documents.json"""', 'jq_schema': '""".[]"""', 'content_key': '"""content"""', 'metadata_func': 'metadata_func'}), "(file_path='./data/documents.json', jq_schema='.[]', content_key=\n    'content', metadata_func=metadata_func)\n", (1123, 1232), False, 'from langchain_community.document_loaders import JSONLoader\n'), ((1264, 1329), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(800)', 'chunk_overlap': '(250)'}), '(chunk_size=800, chunk_overlap=250)\n', (1294, 1329), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((1496, 1584), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='all-MiniLM-L6-v2', model_kwargs={'device':\n    'cpu'})\n", (1517, 1584), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n')] | 
| 
	import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
    _JS_SERIALIZABLE_MAPPING,
    _OG_SERIALIZABLE_MAPPING,
    OLD_CORE_NAMESPACES_MAPPING,
    SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializable import Serializable
DEFAULT_NAMESPACES = ["langchain", "langchain_core", "langchain_community"]
ALL_SERIALIZABLE_MAPPINGS = {
    **SERIALIZABLE_MAPPING,
    **OLD_CORE_NAMESPACES_MAPPING,
    **_OG_SERIALIZABLE_MAPPING,
    **_JS_SERIALIZABLE_MAPPING,
}
class Reviver:
    """Reviver for JSON objects."""
    def __init__(
        self,
        secrets_map: Optional[Dict[str, str]] = None,
        valid_namespaces: Optional[List[str]] = None,
    ) -> None:
        self.secrets_map = secrets_map or dict()
        # By default only support langchain, but user can pass in additional namespaces
        self.valid_namespaces = (
            [*DEFAULT_NAMESPACES, *valid_namespaces]
            if valid_namespaces
            else DEFAULT_NAMESPACES
        )
    def __call__(self, value: Dict[str, Any]) -> Any:
        if (
            value.get("lc", None) == 1
            and value.get("type", None) == "secret"
            and value.get("id", None) is not None
        ):
            [key] = value["id"]
            if key in self.secrets_map:
                return self.secrets_map[key]
            else:
                if key in os.environ and os.environ[key]:
                    return os.environ[key]
                raise KeyError(f'Missing key "{key}" in load(secrets_map)')
        if (
            value.get("lc", None) == 1
            and value.get("type", None) == "not_implemented"
            and value.get("id", None) is not None
        ):
            raise NotImplementedError(
                "Trying to load an object that doesn't implement "
                f"serialization: {value}"
            )
        if (
            value.get("lc", None) == 1
            and value.get("type", None) == "constructor"
            and value.get("id", None) is not None
        ):
            [*namespace, name] = value["id"]
            if namespace[0] not in self.valid_namespaces:
                raise ValueError(f"Invalid namespace: {value}")
            # The root namespace "langchain" is not a valid identifier.
            if len(namespace) == 1 and namespace[0] == "langchain":
                raise ValueError(f"Invalid namespace: {value}")
            # If namespace is in known namespaces, try to use mapping
            if namespace[0] in DEFAULT_NAMESPACES:
                # Get the importable path
                key = tuple(namespace + [name])
                if key not in ALL_SERIALIZABLE_MAPPINGS:
                    raise ValueError(
                        "Trying to deserialize something that cannot "
                        "be deserialized in current version of langchain-core: "
                        f"{key}"
                    )
                import_path = ALL_SERIALIZABLE_MAPPINGS[key]
                # Split into module and name
                import_dir, import_obj = import_path[:-1], import_path[-1]
                # Import module
                mod = importlib.import_module(".".join(import_dir))
                # Import class
                cls = getattr(mod, import_obj)
            # Otherwise, load by path
            else:
                mod = importlib.import_module(".".join(namespace))
                cls = getattr(mod, name)
            # The class must be a subclass of Serializable.
            if not issubclass(cls, Serializable):
                raise ValueError(f"Invalid namespace: {value}")
            # We don't need to recurse on kwargs
            # as json.loads will do that for us.
            kwargs = value.get("kwargs", dict())
            return cls(**kwargs)
        return value
@beta()
def loads(
    text: str,
    *,
    secrets_map: Optional[Dict[str, str]] = None,
    valid_namespaces: Optional[List[str]] = None,
) -> Any:
    """Revive a LangChain class from a JSON string.
    Equivalent to `load(json.loads(text))`.
    Args:
        text: The string to load.
        secrets_map: A map of secrets to load.
        valid_namespaces: A list of additional namespaces (modules)
            to allow to be deserialized.
    Returns:
        Revived LangChain objects.
    """
    return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
@beta()
def load(
    obj: Any,
    *,
    secrets_map: Optional[Dict[str, str]] = None,
    valid_namespaces: Optional[List[str]] = None,
) -> Any:
    """Revive a LangChain class from a JSON object. Use this if you already
    have a parsed JSON object, eg. from `json.load` or `orjson.loads`.
    Args:
        obj: The object to load.
        secrets_map: A map of secrets to load.
        valid_namespaces: A list of additional namespaces (modules)
            to allow to be deserialized.
    Returns:
        Revived LangChain objects.
    """
    reviver = Reviver(secrets_map, valid_namespaces)
    def _load(obj: Any) -> Any:
        if isinstance(obj, dict):
            # Need to revive leaf nodes before reviving this node
            loaded_obj = {k: _load(v) for k, v in obj.items()}
            return reviver(loaded_obj)
        if isinstance(obj, list):
            return [_load(o) for o in obj]
        return obj
    return _load(obj)
 | 
	[
  "langchain_core._api.beta"
] | 
	[((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] | 
| 
	from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
    KnowledgeTriple,
    get_entities,
    parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
    ENTITY_EXTRACTION_PROMPT,
    KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
    """Knowledge graph conversation memory.
    Integrates with external knowledge graph to store and retrieve
    information about knowledge triples in the conversation.
    """
    k: int = 2
    human_prefix: str = "Human"
    ai_prefix: str = "AI"
    kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
    knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
    entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
    llm: BaseLanguageModel
    summary_message_cls: Type[BaseMessage] = SystemMessage
    """Number of previous utterances to include in the context."""
    memory_key: str = "history"  #: :meta private:
    def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """Return history buffer."""
        entities = self._get_current_entities(inputs)
        summary_strings = []
        for entity in entities:
            knowledge = self.kg.get_entity_knowledge(entity)
            if knowledge:
                summary = f"On {entity}: {'. '.join(knowledge)}."
                summary_strings.append(summary)
        context: Union[str, List]
        if not summary_strings:
            context = [] if self.return_messages else ""
        elif self.return_messages:
            context = [
                self.summary_message_cls(content=text) for text in summary_strings
            ]
        else:
            context = "\n".join(summary_strings)
        return {self.memory_key: context}
    @property
    def memory_variables(self) -> List[str]:
        """Will always return list of memory variables.
        :meta private:
        """
        return [self.memory_key]
    def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
        """Get the input key for the prompt."""
        if self.input_key is None:
            return get_prompt_input_key(inputs, self.memory_variables)
        return self.input_key
    def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
        """Get the output key for the prompt."""
        if self.output_key is None:
            if len(outputs) != 1:
                raise ValueError(f"One output key expected, got {outputs.keys()}")
            return list(outputs.keys())[0]
        return self.output_key
    def get_current_entities(self, input_string: str) -> List[str]:
        chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
        buffer_string = get_buffer_string(
            self.chat_memory.messages[-self.k * 2 :],
            human_prefix=self.human_prefix,
            ai_prefix=self.ai_prefix,
        )
        output = chain.predict(
            history=buffer_string,
            input=input_string,
        )
        return get_entities(output)
    def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
        """Get the current entities in the conversation."""
        prompt_input_key = self._get_prompt_input_key(inputs)
        return self.get_current_entities(inputs[prompt_input_key])
    def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
        chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
        buffer_string = get_buffer_string(
            self.chat_memory.messages[-self.k * 2 :],
            human_prefix=self.human_prefix,
            ai_prefix=self.ai_prefix,
        )
        output = chain.predict(
            history=buffer_string,
            input=input_string,
            verbose=True,
        )
        knowledge = parse_triples(output)
        return knowledge
    def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
        """Get and update knowledge graph from the conversation history."""
        prompt_input_key = self._get_prompt_input_key(inputs)
        knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
        for triple in knowledge:
            self.kg.add_triple(triple)
    def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
        """Save context from this conversation to buffer."""
        super().save_context(inputs, outputs)
        self._get_and_update_kg(inputs)
    def clear(self) -> None:
        """Clear memory contents."""
        super().clear()
        self.kg.clear()
 | 
	[
  "langchain_community.graphs.networkx_graph.get_entities",
  "langchain.chains.llm.LLMChain",
  "langchain.memory.utils.get_prompt_input_key",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.messages.get_buffer_string",
  "langchain_community.graphs.networkx_graph.parse_triples"
] | 
	[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n    self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n    self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')] | 
| 
	"""**Retriever** class returns Documents given a text **query**.
It is more general than a vector store. A retriever does not need to be able to
store documents, only to return (or retrieve) it. Vector stores can be used as
the backbone of a retriever, but there are other types of retrievers as well.
**Class hierarchy:**
.. code-block::
    BaseRetriever --> <name>Retriever  # Examples: ArxivRetriever, MergerRetriever
**Main helpers:**
.. code-block::
    RetrieverInput, RetrieverOutput, RetrieverLike, RetrieverOutputLike,
    Document, Serializable, Callbacks,
    CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from inspect import signature
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.documents import Document
from langchain_core.load.dump import dumpd
from langchain_core.runnables import (
    Runnable,
    RunnableConfig,
    RunnableSerializable,
    ensure_config,
)
from langchain_core.runnables.config import run_in_executor
if TYPE_CHECKING:
    from langchain_core.callbacks.manager import (
        AsyncCallbackManagerForRetrieverRun,
        CallbackManagerForRetrieverRun,
        Callbacks,
    )
RetrieverInput = str
RetrieverOutput = List[Document]
RetrieverLike = Runnable[RetrieverInput, RetrieverOutput]
RetrieverOutputLike = Runnable[Any, RetrieverOutput]
class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
    """Abstract base class for a Document retrieval system.
    A retrieval system is defined as something that can take string queries and return
        the most 'relevant' Documents from some source.
    Example:
        .. code-block:: python
            class TFIDFRetriever(BaseRetriever, BaseModel):
                vectorizer: Any
                docs: List[Document]
                tfidf_array: Any
                k: int = 4
                class Config:
                    arbitrary_types_allowed = True
                def get_relevant_documents(self, query: str) -> List[Document]:
                    from sklearn.metrics.pairwise import cosine_similarity
                    # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
                    query_vec = self.vectorizer.transform([query])
                    # Op -- (n_docs,1) -- Cosine Sim with each doc
                    results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
                    return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
    """  # noqa: E501
    class Config:
        """Configuration for this pydantic object."""
        arbitrary_types_allowed = True
    _new_arg_supported: bool = False
    _expects_other_args: bool = False
    tags: Optional[List[str]] = None
    """Optional list of tags associated with the retriever. Defaults to None
    These tags will be associated with each call to this retriever,
    and passed as arguments to the handlers defined in `callbacks`.
    You can use these to eg identify a specific instance of a retriever with its 
    use case.
    """
    metadata: Optional[Dict[str, Any]] = None
    """Optional metadata associated with the retriever. Defaults to None
    This metadata will be associated with each call to this retriever,
    and passed as arguments to the handlers defined in `callbacks`.
    You can use these to eg identify a specific instance of a retriever with its 
    use case.
    """
    def __init_subclass__(cls, **kwargs: Any) -> None:
        super().__init_subclass__(**kwargs)
        # Version upgrade for old retrievers that implemented the public
        # methods directly.
        if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:
            warnings.warn(
                "Retrievers must implement abstract `_get_relevant_documents` method"
                " instead of `get_relevant_documents`",
                DeprecationWarning,
            )
            swap = cls.get_relevant_documents
            cls.get_relevant_documents = (  # type: ignore[assignment]
                BaseRetriever.get_relevant_documents
            )
            cls._get_relevant_documents = swap  # type: ignore[assignment]
        if (
            hasattr(cls, "aget_relevant_documents")
            and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents
        ):
            warnings.warn(
                "Retrievers must implement abstract `_aget_relevant_documents` method"
                " instead of `aget_relevant_documents`",
                DeprecationWarning,
            )
            aswap = cls.aget_relevant_documents
            cls.aget_relevant_documents = (  # type: ignore[assignment]
                BaseRetriever.aget_relevant_documents
            )
            cls._aget_relevant_documents = aswap  # type: ignore[assignment]
        parameters = signature(cls._get_relevant_documents).parameters
        cls._new_arg_supported = parameters.get("run_manager") is not None
        # If a V1 retriever broke the interface and expects additional arguments
        cls._expects_other_args = (
            len(set(parameters.keys()) - {"self", "query", "run_manager"}) > 0
        )
    def invoke(
        self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any
    ) -> List[Document]:
        config = ensure_config(config)
        return self.get_relevant_documents(
            input,
            callbacks=config.get("callbacks"),
            tags=config.get("tags"),
            metadata=config.get("metadata"),
            run_name=config.get("run_name"),
            **kwargs,
        )
    async def ainvoke(
        self,
        input: str,
        config: Optional[RunnableConfig] = None,
        **kwargs: Any,
    ) -> List[Document]:
        config = ensure_config(config)
        return await self.aget_relevant_documents(
            input,
            callbacks=config.get("callbacks"),
            tags=config.get("tags"),
            metadata=config.get("metadata"),
            run_name=config.get("run_name"),
            **kwargs,
        )
    @abstractmethod
    def _get_relevant_documents(
        self, query: str, *, run_manager: CallbackManagerForRetrieverRun
    ) -> List[Document]:
        """Get documents relevant to a query.
        Args:
            query: String to find relevant documents for
            run_manager: The callbacks handler to use
        Returns:
            List of relevant documents
        """
    async def _aget_relevant_documents(
        self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
    ) -> List[Document]:
        """Asynchronously get documents relevant to a query.
        Args:
            query: String to find relevant documents for
            run_manager: The callbacks handler to use
        Returns:
            List of relevant documents
        """
        return await run_in_executor(
            None,
            self._get_relevant_documents,
            query,
            run_manager=run_manager.get_sync(),
        )
    def get_relevant_documents(
        self,
        query: str,
        *,
        callbacks: Callbacks = None,
        tags: Optional[List[str]] = None,
        metadata: Optional[Dict[str, Any]] = None,
        run_name: Optional[str] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Retrieve documents relevant to a query.
        Args:
            query: string to find relevant documents for
            callbacks: Callback manager or list of callbacks
            tags: Optional list of tags associated with the retriever. Defaults to None
                These tags will be associated with each call to this retriever,
                and passed as arguments to the handlers defined in `callbacks`.
            metadata: Optional metadata associated with the retriever. Defaults to None
                This metadata will be associated with each call to this retriever,
                and passed as arguments to the handlers defined in `callbacks`.
        Returns:
            List of relevant documents
        """
        from langchain_core.callbacks.manager import CallbackManager
        callback_manager = CallbackManager.configure(
            callbacks,
            None,
            verbose=kwargs.get("verbose", False),
            inheritable_tags=tags,
            local_tags=self.tags,
            inheritable_metadata=metadata,
            local_metadata=self.metadata,
        )
        run_manager = callback_manager.on_retriever_start(
            dumpd(self),
            query,
            name=run_name,
            run_id=kwargs.pop("run_id", None),
        )
        try:
            _kwargs = kwargs if self._expects_other_args else {}
            if self._new_arg_supported:
                result = self._get_relevant_documents(
                    query, run_manager=run_manager, **_kwargs
                )
            else:
                result = self._get_relevant_documents(query, **_kwargs)
        except Exception as e:
            run_manager.on_retriever_error(e)
            raise e
        else:
            run_manager.on_retriever_end(
                result,
            )
            return result
    async def aget_relevant_documents(
        self,
        query: str,
        *,
        callbacks: Callbacks = None,
        tags: Optional[List[str]] = None,
        metadata: Optional[Dict[str, Any]] = None,
        run_name: Optional[str] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Asynchronously get documents relevant to a query.
        Args:
            query: string to find relevant documents for
            callbacks: Callback manager or list of callbacks
            tags: Optional list of tags associated with the retriever. Defaults to None
                These tags will be associated with each call to this retriever,
                and passed as arguments to the handlers defined in `callbacks`.
            metadata: Optional metadata associated with the retriever. Defaults to None
                This metadata will be associated with each call to this retriever,
                and passed as arguments to the handlers defined in `callbacks`.
        Returns:
            List of relevant documents
        """
        from langchain_core.callbacks.manager import AsyncCallbackManager
        callback_manager = AsyncCallbackManager.configure(
            callbacks,
            None,
            verbose=kwargs.get("verbose", False),
            inheritable_tags=tags,
            local_tags=self.tags,
            inheritable_metadata=metadata,
            local_metadata=self.metadata,
        )
        run_manager = await callback_manager.on_retriever_start(
            dumpd(self),
            query,
            name=run_name,
            run_id=kwargs.pop("run_id", None),
        )
        try:
            _kwargs = kwargs if self._expects_other_args else {}
            if self._new_arg_supported:
                result = await self._aget_relevant_documents(
                    query, run_manager=run_manager, **_kwargs
                )
            else:
                result = await self._aget_relevant_documents(query, **_kwargs)
        except Exception as e:
            await run_manager.on_retriever_error(e)
            raise e
        else:
            await run_manager.on_retriever_end(
                result,
            )
            return result
 | 
	[
  "langchain_core.runnables.ensure_config",
  "langchain_core.load.dump.dumpd"
] | 
	[((5405, 5426), 'langchain_core.runnables.ensure_config', 'ensure_config', (['config'], {}), '(config)\n', (5418, 5426), False, 'from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable, ensure_config\n'), ((5868, 5889), 'langchain_core.runnables.ensure_config', 'ensure_config', (['config'], {}), '(config)\n', (5881, 5889), False, 'from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable, ensure_config\n'), ((3800, 3950), 'warnings.warn', 'warnings.warn', (['"""Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`"""', 'DeprecationWarning'], {}), "(\n    'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`'\n    , DeprecationWarning)\n", (3813, 3950), False, 'import warnings\n'), ((4439, 4591), 'warnings.warn', 'warnings.warn', (['"""Retrievers must implement abstract `_aget_relevant_documents` method instead of `aget_relevant_documents`"""', 'DeprecationWarning'], {}), "(\n    'Retrievers must implement abstract `_aget_relevant_documents` method instead of `aget_relevant_documents`'\n    , DeprecationWarning)\n", (4452, 4591), False, 'import warnings\n'), ((4934, 4972), 'inspect.signature', 'signature', (['cls._get_relevant_documents'], {}), '(cls._get_relevant_documents)\n', (4943, 4972), False, 'from inspect import signature\n'), ((8626, 8637), 'langchain_core.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (8631, 8637), False, 'from langchain_core.load.dump import dumpd\n'), ((10831, 10842), 'langchain_core.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (10836, 10842), False, 'from langchain_core.load.dump import dumpd\n')] | 
| 
	# Ingest Documents into a Zep Collection
import os
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000")
ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None)
ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION", "langchaintest")
collection_config = CollectionConfig(
    name=ZEP_COLLECTION_NAME,
    description="Zep collection for LangChain",
    metadata={},
    embedding_dimensions=1536,
    is_auto_embedded=True,
)
# Load
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = ZepVectorStore.from_documents(
    documents=all_splits,
    collection_name=ZEP_COLLECTION_NAME,
    config=collection_config,
    api_url=ZEP_API_URL,
    api_key=ZEP_API_KEY,
    embedding=FakeEmbeddings(size=1),
)
 | 
	[
  "langchain_community.document_loaders.WebBaseLoader",
  "langchain_community.embeddings.FakeEmbeddings",
  "langchain_community.vectorstores.zep.CollectionConfig",
  "langchain_text_splitters.RecursiveCharacterTextSplitter"
] | 
	[((338, 392), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""', '"""http://localhost:8000"""'], {}), "('ZEP_API_URL', 'http://localhost:8000')\n", (352, 392), False, 'import os\n'), ((407, 442), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""', 'None'], {}), "('ZEP_API_KEY', None)\n", (421, 442), False, 'import os\n'), ((465, 514), 'os.environ.get', 'os.environ.get', (['"""ZEP_COLLECTION"""', '"""langchaintest"""'], {}), "('ZEP_COLLECTION', 'langchaintest')\n", (479, 514), False, 'import os\n'), ((536, 694), 'langchain_community.vectorstores.zep.CollectionConfig', 'CollectionConfig', ([], {'name': 'ZEP_COLLECTION_NAME', 'description': '"""Zep collection for LangChain"""', 'metadata': '{}', 'embedding_dimensions': '(1536)', 'is_auto_embedded': '(True)'}), "(name=ZEP_COLLECTION_NAME, description=\n    'Zep collection for LangChain', metadata={}, embedding_dimensions=1536,\n    is_auto_embedded=True)\n", (552, 694), False, 'from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore\n'), ((726, 795), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://lilianweng.github.io/posts/2023-06-23-agent/"""'], {}), "('https://lilianweng.github.io/posts/2023-06-23-agent/')\n", (739, 795), False, 'from langchain_community.document_loaders import WebBaseLoader\n'), ((842, 905), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (872, 905), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((1180, 1202), 'langchain_community.embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {'size': '(1)'}), '(size=1)\n', (1194, 1202), False, 'from langchain_community.embeddings import FakeEmbeddings\n')] | 
| 
	from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
template = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, you can respond as normal to the user.
Example 1:
Human: Hi!
Assistant: Hi! How are you?
Human: What is the weather in SF?
Assistant: <tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
It is 64 degress in SF
Begin!"""  # noqa: E501
conversational_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", template),
        MessagesPlaceholder(variable_name="chat_history"),
        ("user", "{question}"),
        ("ai", "{agent_scratchpad}"),
    ]
)
def parse_output(message):
    text = message.content
    if "</tool>" in text:
        tool, tool_input = text.split("</tool>")
        _tool = tool.split("<tool>")[1]
        _tool_input = tool_input.split("<tool_input>")[1]
        if "</tool_input>" in _tool_input:
            _tool_input = _tool_input.split("</tool_input>")[0]
        return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
    else:
        return AgentFinish(return_values={"output": text}, log=text)
 | 
	[
  "langchain_core.prompts.MessagesPlaceholder",
  "langchain_core.agents.AgentAction",
  "langchain_core.agents.AgentFinish"
] | 
	[((1068, 1117), 'langchain_core.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (1087, 1117), False, 'from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n'), ((1548, 1605), 'langchain_core.agents.AgentAction', 'AgentAction', ([], {'tool': '_tool', 'tool_input': '_tool_input', 'log': 'text'}), '(tool=_tool, tool_input=_tool_input, log=text)\n', (1559, 1605), False, 'from langchain_core.agents import AgentAction, AgentFinish\n'), ((1631, 1684), 'langchain_core.agents.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': text}", 'log': 'text'}), "(return_values={'output': text}, log=text)\n", (1642, 1684), False, 'from langchain_core.agents import AgentAction, AgentFinish\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM 
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
    WITH row
    MERGE (m:Movie {id:row.movieId})
    SET m.released = date(row.released),
        m.title = row.title,
        m.imdbRating = toFloat(row.imdbRating)
    FOREACH (director in split(row.director, '|') | 
        MERGE (p:Person {name:trim(director)})
        MERGE (p)-[:DIRECTED]->(m))
    FOREACH (actor in split(row.actors, '|') | 
        MERGE (p:Person {name:trim(actor)})
        MERGE (p)-[:ACTED_IN]->(m))
    FOREACH (genre in split(row.genres, '|') | 
        MERGE (g:Genre {name:trim(genre)})
        MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM 
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
    WITH row
    MATCH (m:Movie {id:row.movieId})
    MERGE (u:User {id:row.userId})
    MERGE (u)-[r:RATED]->(m)
    SET r.rating = toFloat(row.rating),
        r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
    "CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	"""Tool for the Exa Search API."""
from typing import Dict, List, Optional, Union
from exa_py import Exa  # type: ignore
from exa_py.api import HighlightsContentsOptions, TextContentsOptions  # type: ignore
from langchain_core.callbacks import (
    CallbackManagerForToolRun,
)
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.tools import BaseTool
from langchain_exa._utilities import initialize_client
class ExaSearchResults(BaseTool):
    """Tool that queries the Metaphor Search API and gets back json."""
    name: str = "exa_search_results_json"
    description: str = (
        "A wrapper around Exa Search. "
        "Input should be an Exa-optimized query. "
        "Output is a JSON array of the query results"
    )
    client: Exa = Field(default=None)
    exa_api_key: SecretStr = Field(default=None)
    @root_validator(pre=True)
    def validate_environment(cls, values: Dict) -> Dict:
        """Validate the environment."""
        values = initialize_client(values)
        return values
    def _run(
        self,
        query: str,
        num_results: int,
        text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
        highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
        include_domains: Optional[List[str]] = None,
        exclude_domains: Optional[List[str]] = None,
        start_crawl_date: Optional[str] = None,
        end_crawl_date: Optional[str] = None,
        start_published_date: Optional[str] = None,
        end_published_date: Optional[str] = None,
        use_autoprompt: Optional[bool] = None,
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> Union[List[Dict], str]:
        """Use the tool."""
        try:
            return self.client.search_and_contents(
                query,
                num_results=num_results,
                text=text_contents_options,  # type: ignore
                highlights=highlights,  # type: ignore
                include_domains=include_domains,
                exclude_domains=exclude_domains,
                start_crawl_date=start_crawl_date,
                end_crawl_date=end_crawl_date,
                start_published_date=start_published_date,
                end_published_date=end_published_date,
                use_autoprompt=use_autoprompt,
            )  # type: ignore
        except Exception as e:
            return repr(e)
class ExaFindSimilarResults(BaseTool):
    """Tool that queries the Metaphor Search API and gets back json."""
    name: str = "exa_find_similar_results_json"
    description: str = (
        "A wrapper around Exa Find Similar. "
        "Input should be an Exa-optimized query. "
        "Output is a JSON array of the query results"
    )
    client: Exa = Field(default=None)
    exa_api_key: SecretStr = Field(default=None)
    exa_base_url: Optional[str] = None
    @root_validator(pre=True)
    def validate_environment(cls, values: Dict) -> Dict:
        """Validate the environment."""
        values = initialize_client(values)
        return values
    def _run(
        self,
        url: str,
        num_results: int,
        text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
        highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
        include_domains: Optional[List[str]] = None,
        exclude_domains: Optional[List[str]] = None,
        start_crawl_date: Optional[str] = None,
        end_crawl_date: Optional[str] = None,
        start_published_date: Optional[str] = None,
        end_published_date: Optional[str] = None,
        exclude_source_domain: Optional[bool] = None,
        category: Optional[str] = None,
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> Union[List[Dict], str]:
        """Use the tool."""
        try:
            return self.client.find_similar_and_contents(
                url,
                num_results=num_results,
                text=text_contents_options,  # type: ignore
                highlights=highlights,  # type: ignore
                include_domains=include_domains,
                exclude_domains=exclude_domains,
                start_crawl_date=start_crawl_date,
                end_crawl_date=end_crawl_date,
                start_published_date=start_published_date,
                end_published_date=end_published_date,
                exclude_source_domain=exclude_source_domain,
                category=category,
            )  # type: ignore
        except Exception as e:
            return repr(e)
 | 
	[
  "langchain_exa._utilities.initialize_client",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.pydantic_v1.root_validator"
] | 
	[((796, 815), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (801, 815), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((845, 864), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (850, 864), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((871, 895), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (885, 895), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2818, 2837), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2823, 2837), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2867, 2886), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2872, 2886), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2932, 2956), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2946, 2956), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((1010, 1035), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (1027, 1035), False, 'from langchain_exa._utilities import initialize_client\n'), ((3071, 3096), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (3088, 3096), False, 'from langchain_exa._utilities import initialize_client\n')] | 
| 
	from typing import Any, List, Literal
from langchain_core.messages.base import (
    BaseMessage,
    BaseMessageChunk,
    merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
    """Message that can be assigned an arbitrary speaker (i.e. role)."""
    role: str
    """The speaker / role of the Message."""
    type: Literal["chat"] = "chat"
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "messages"]
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
    """Chat Message chunk."""
    # Ignoring mypy re-assignment here since we're overriding the value
    # to make sure that the chunk variant can be discriminated from the
    # non-chunk variant.
    type: Literal["ChatMessageChunk"] = "ChatMessageChunk"  # type: ignore
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "messages"]
    def __add__(self, other: Any) -> BaseMessageChunk:  # type: ignore
        if isinstance(other, ChatMessageChunk):
            if self.role != other.role:
                raise ValueError(
                    "Cannot concatenate ChatMessageChunks with different roles."
                )
            return self.__class__(
                role=self.role,
                content=merge_content(self.content, other.content),
                additional_kwargs=merge_dicts(
                    self.additional_kwargs, other.additional_kwargs
                ),
                response_metadata=merge_dicts(
                    self.response_metadata, other.response_metadata
                ),
            )
        elif isinstance(other, BaseMessageChunk):
            return self.__class__(
                role=self.role,
                content=merge_content(self.content, other.content),
                additional_kwargs=merge_dicts(
                    self.additional_kwargs, other.additional_kwargs
                ),
                response_metadata=merge_dicts(
                    self.response_metadata, other.response_metadata
                ),
            )
        else:
            return super().__add__(other)
 | 
	[
  "langchain_core.messages.base.merge_content",
  "langchain_core.utils._merge.merge_dicts"
] | 
	[((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (1579, 1628), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1702, 1762), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (1713, 1762), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1957, 1999), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1970, 1999), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((2035, 2095), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (2046, 2095), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((2169, 2229), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (2180, 2229), False, 'from langchain_core.utils._merge import merge_dicts\n')] | 
| 
	from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
    """An image prompt template for a multimodal model."""
    template: dict = Field(default_factory=dict)
    """Template for the prompt."""
    def __init__(self, **kwargs: Any) -> None:
        if "input_variables" not in kwargs:
            kwargs["input_variables"] = []
        overlap = set(kwargs["input_variables"]) & set(("url", "path", "detail"))
        if overlap:
            raise ValueError(
                "input_variables for the image template cannot contain"
                " any of 'url', 'path', or 'detail'."
                f" Found: {overlap}"
            )
        super().__init__(**kwargs)
    @property
    def _prompt_type(self) -> str:
        """Return the prompt type key."""
        return "image-prompt"
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "prompts", "image"]
    def format_prompt(self, **kwargs: Any) -> PromptValue:
        """Create Chat Messages."""
        return ImagePromptValue(image_url=self.format(**kwargs))
    def format(
        self,
        **kwargs: Any,
    ) -> ImageURL:
        """Format the prompt with the inputs.
        Args:
            kwargs: Any arguments to be passed to the prompt template.
        Returns:
            A formatted string.
        Example:
            .. code-block:: python
                prompt.format(variable1="foo")
        """
        formatted = {}
        for k, v in self.template.items():
            if isinstance(v, str):
                formatted[k] = v.format(**kwargs)
            else:
                formatted[k] = v
        url = kwargs.get("url") or formatted.get("url")
        path = kwargs.get("path") or formatted.get("path")
        detail = kwargs.get("detail") or formatted.get("detail")
        if not url and not path:
            raise ValueError("Must provide either url or path.")
        if not url:
            if not isinstance(path, str):
                raise ValueError("path must be a string.")
            url = image_utils.image_to_data_url(path)
        if not isinstance(url, str):
            raise ValueError("url must be a string.")
        output: ImageURL = {"url": url}
        if detail:
            # Don't check literal values here: let the API check them
            output["detail"] = detail  # type: ignore[typeddict-item]
        return output
    def pretty_repr(self, html: bool = False) -> str:
        raise NotImplementedError()
 | 
	[
  "langchain_core.pydantic_v1.Field",
  "langchain_core.utils.image.image_to_data_url"
] | 
	[((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (2420, 2426), True, 'from langchain_core.utils import image as image_utils\n')] | 
| 
	"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
    BaseLanguageModel --> BaseLLM --> LLM --> <name>  # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
    LLMResult, PromptValue,
    CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
    CallbackManager, AsyncCallbackManager,
    AIMessage, BaseMessage
"""  # noqa: E501
import warnings
from typing import Any, Callable, Dict, Type
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.language_models.llms import BaseLLM
from langchain.utils.interactive_env import is_interactive_env
def _import_ai21() -> Any:
    from langchain_community.llms.ai21 import AI21
    return AI21
def _import_aleph_alpha() -> Any:
    from langchain_community.llms.aleph_alpha import AlephAlpha
    return AlephAlpha
def _import_amazon_api_gateway() -> Any:
    from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
    return AmazonAPIGateway
def _import_anthropic() -> Any:
    from langchain_community.llms.anthropic import Anthropic
    return Anthropic
def _import_anyscale() -> Any:
    from langchain_community.llms.anyscale import Anyscale
    return Anyscale
def _import_arcee() -> Any:
    from langchain_community.llms.arcee import Arcee
    return Arcee
def _import_aviary() -> Any:
    from langchain_community.llms.aviary import Aviary
    return Aviary
def _import_azureml_endpoint() -> Any:
    from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
    return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
    from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
    return QianfanLLMEndpoint
def _import_bananadev() -> Any:
    from langchain_community.llms.bananadev import Banana
    return Banana
def _import_baseten() -> Any:
    from langchain_community.llms.baseten import Baseten
    return Baseten
def _import_beam() -> Any:
    from langchain_community.llms.beam import Beam
    return Beam
def _import_bedrock() -> Any:
    from langchain_community.llms.bedrock import Bedrock
    return Bedrock
def _import_bittensor() -> Any:
    from langchain_community.llms.bittensor import NIBittensorLLM
    return NIBittensorLLM
def _import_cerebriumai() -> Any:
    from langchain_community.llms.cerebriumai import CerebriumAI
    return CerebriumAI
def _import_chatglm() -> Any:
    from langchain_community.llms.chatglm import ChatGLM
    return ChatGLM
def _import_clarifai() -> Any:
    from langchain_community.llms.clarifai import Clarifai
    return Clarifai
def _import_cohere() -> Any:
    from langchain_community.llms.cohere import Cohere
    return Cohere
def _import_ctransformers() -> Any:
    from langchain_community.llms.ctransformers import CTransformers
    return CTransformers
def _import_ctranslate2() -> Any:
    from langchain_community.llms.ctranslate2 import CTranslate2
    return CTranslate2
def _import_databricks() -> Any:
    from langchain_community.llms.databricks import Databricks
    return Databricks
def _import_databricks_chat() -> Any:
    from langchain_community.chat_models.databricks import ChatDatabricks
    return ChatDatabricks
def _import_deepinfra() -> Any:
    from langchain_community.llms.deepinfra import DeepInfra
    return DeepInfra
def _import_deepsparse() -> Any:
    from langchain_community.llms.deepsparse import DeepSparse
    return DeepSparse
def _import_edenai() -> Any:
    from langchain_community.llms.edenai import EdenAI
    return EdenAI
def _import_fake() -> Any:
    from langchain_community.llms.fake import FakeListLLM
    return FakeListLLM
def _import_fireworks() -> Any:
    from langchain_community.llms.fireworks import Fireworks
    return Fireworks
def _import_forefrontai() -> Any:
    from langchain_community.llms.forefrontai import ForefrontAI
    return ForefrontAI
def _import_gigachat() -> Any:
    from langchain_community.llms.gigachat import GigaChat
    return GigaChat
def _import_google_palm() -> Any:
    from langchain_community.llms.google_palm import GooglePalm
    return GooglePalm
def _import_gooseai() -> Any:
    from langchain_community.llms.gooseai import GooseAI
    return GooseAI
def _import_gpt4all() -> Any:
    from langchain_community.llms.gpt4all import GPT4All
    return GPT4All
def _import_gradient_ai() -> Any:
    from langchain_community.llms.gradient_ai import GradientLLM
    return GradientLLM
def _import_huggingface_endpoint() -> Any:
    from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
    return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
    from langchain_community.llms.huggingface_hub import HuggingFaceHub
    return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
    from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
    return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
    from langchain_community.llms.huggingface_text_gen_inference import (
        HuggingFaceTextGenInference,
    )
    return HuggingFaceTextGenInference
def _import_human() -> Any:
    from langchain_community.llms.human import HumanInputLLM
    return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
    from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
    return JavelinAIGateway
def _import_koboldai() -> Any:
    from langchain_community.llms.koboldai import KoboldApiLLM
    return KoboldApiLLM
def _import_llamacpp() -> Any:
    from langchain_community.llms.llamacpp import LlamaCpp
    return LlamaCpp
def _import_manifest() -> Any:
    from langchain_community.llms.manifest import ManifestWrapper
    return ManifestWrapper
def _import_minimax() -> Any:
    from langchain_community.llms.minimax import Minimax
    return Minimax
def _import_mlflow() -> Any:
    from langchain_community.llms.mlflow import Mlflow
    return Mlflow
def _import_mlflow_chat() -> Any:
    from langchain_community.chat_models.mlflow import ChatMlflow
    return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
    from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
    return MlflowAIGateway
def _import_modal() -> Any:
    from langchain_community.llms.modal import Modal
    return Modal
def _import_mosaicml() -> Any:
    from langchain_community.llms.mosaicml import MosaicML
    return MosaicML
def _import_nlpcloud() -> Any:
    from langchain_community.llms.nlpcloud import NLPCloud
    return NLPCloud
def _import_octoai_endpoint() -> Any:
    from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
    return OctoAIEndpoint
def _import_ollama() -> Any:
    from langchain_community.llms.ollama import Ollama
    return Ollama
def _import_opaqueprompts() -> Any:
    from langchain_community.llms.opaqueprompts import OpaquePrompts
    return OpaquePrompts
def _import_azure_openai() -> Any:
    from langchain_community.llms.openai import AzureOpenAI
    return AzureOpenAI
def _import_openai() -> Any:
    from langchain_community.llms.openai import OpenAI
    return OpenAI
def _import_openai_chat() -> Any:
    from langchain_community.llms.openai import OpenAIChat
    return OpenAIChat
def _import_openllm() -> Any:
    from langchain_community.llms.openllm import OpenLLM
    return OpenLLM
def _import_openlm() -> Any:
    from langchain_community.llms.openlm import OpenLM
    return OpenLM
def _import_pai_eas_endpoint() -> Any:
    from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
    return PaiEasEndpoint
def _import_petals() -> Any:
    from langchain_community.llms.petals import Petals
    return Petals
def _import_pipelineai() -> Any:
    from langchain_community.llms.pipelineai import PipelineAI
    return PipelineAI
def _import_predibase() -> Any:
    from langchain_community.llms.predibase import Predibase
    return Predibase
def _import_predictionguard() -> Any:
    from langchain_community.llms.predictionguard import PredictionGuard
    return PredictionGuard
def _import_promptlayer() -> Any:
    from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
    return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
    from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
    return PromptLayerOpenAIChat
def _import_replicate() -> Any:
    from langchain_community.llms.replicate import Replicate
    return Replicate
def _import_rwkv() -> Any:
    from langchain_community.llms.rwkv import RWKV
    return RWKV
def _import_sagemaker_endpoint() -> Any:
    from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
    return SagemakerEndpoint
def _import_self_hosted() -> Any:
    from langchain_community.llms.self_hosted import SelfHostedPipeline
    return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
    from langchain_community.llms.self_hosted_hugging_face import (
        SelfHostedHuggingFaceLLM,
    )
    return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
    from langchain_community.llms.stochasticai import StochasticAI
    return StochasticAI
def _import_symblai_nebula() -> Any:
    from langchain_community.llms.symblai_nebula import Nebula
    return Nebula
def _import_textgen() -> Any:
    from langchain_community.llms.textgen import TextGen
    return TextGen
def _import_titan_takeoff() -> Any:
    from langchain_community.llms.titan_takeoff import TitanTakeoff
    return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
    from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
    return TitanTakeoffPro
def _import_together() -> Any:
    from langchain_community.llms.together import Together
    return Together
def _import_tongyi() -> Any:
    from langchain_community.llms.tongyi import Tongyi
    return Tongyi
def _import_vertex() -> Any:
    from langchain_community.llms.vertexai import VertexAI
    return VertexAI
def _import_vertex_model_garden() -> Any:
    from langchain_community.llms.vertexai import VertexAIModelGarden
    return VertexAIModelGarden
def _import_vllm() -> Any:
    from langchain_community.llms.vllm import VLLM
    return VLLM
def _import_vllm_openai() -> Any:
    from langchain_community.llms.vllm import VLLMOpenAI
    return VLLMOpenAI
def _import_watsonxllm() -> Any:
    from langchain_community.llms.watsonxllm import WatsonxLLM
    return WatsonxLLM
def _import_writer() -> Any:
    from langchain_community.llms.writer import Writer
    return Writer
def _import_xinference() -> Any:
    from langchain_community.llms.xinference import Xinference
    return Xinference
def _import_yandex_gpt() -> Any:
    from langchain_community.llms.yandex import YandexGPT
    return YandexGPT
def _import_volcengine_maas() -> Any:
    from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
    return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
    from langchain_community import llms
    # If not in interactive env, raise warning.
    if not is_interactive_env():
        warnings.warn(
            "Importing LLMs from langchain is deprecated. Importing from "
            "langchain will no longer be supported as of langchain==0.2.0. "
            "Please import from langchain-community instead:\n\n"
            f"`from langchain_community.llms import {name}`.\n\n"
            "To install langchain-community run `pip install -U langchain-community`.",
            category=LangChainDeprecationWarning,
        )
    if name == "type_to_cls_dict":
        # for backwards compatibility
        type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
            k: v() for k, v in get_type_to_cls_dict().items()
        }
        return type_to_cls_dict
    else:
        return getattr(llms, name)
__all__ = [
    "AI21",
    "AlephAlpha",
    "AmazonAPIGateway",
    "Anthropic",
    "Anyscale",
    "Arcee",
    "Aviary",
    "AzureMLOnlineEndpoint",
    "AzureOpenAI",
    "Banana",
    "Baseten",
    "Beam",
    "Bedrock",
    "CTransformers",
    "CTranslate2",
    "CerebriumAI",
    "ChatGLM",
    "Clarifai",
    "Cohere",
    "Databricks",
    "DeepInfra",
    "DeepSparse",
    "EdenAI",
    "FakeListLLM",
    "Fireworks",
    "ForefrontAI",
    "GigaChat",
    "GPT4All",
    "GooglePalm",
    "GooseAI",
    "GradientLLM",
    "HuggingFaceEndpoint",
    "HuggingFaceHub",
    "HuggingFacePipeline",
    "HuggingFaceTextGenInference",
    "HumanInputLLM",
    "KoboldApiLLM",
    "LlamaCpp",
    "TextGen",
    "ManifestWrapper",
    "Minimax",
    "MlflowAIGateway",
    "Modal",
    "MosaicML",
    "Nebula",
    "NIBittensorLLM",
    "NLPCloud",
    "Ollama",
    "OpenAI",
    "OpenAIChat",
    "OpenLLM",
    "OpenLM",
    "PaiEasEndpoint",
    "Petals",
    "PipelineAI",
    "Predibase",
    "PredictionGuard",
    "PromptLayerOpenAI",
    "PromptLayerOpenAIChat",
    "OpaquePrompts",
    "RWKV",
    "Replicate",
    "SagemakerEndpoint",
    "SelfHostedHuggingFaceLLM",
    "SelfHostedPipeline",
    "StochasticAI",
    "TitanTakeoff",
    "TitanTakeoffPro",
    "Tongyi",
    "VertexAI",
    "VertexAIModelGarden",
    "VLLM",
    "VLLMOpenAI",
    "WatsonxLLM",
    "Writer",
    "OctoAIEndpoint",
    "Xinference",
    "JavelinAIGateway",
    "QianfanLLMEndpoint",
    "YandexGPT",
    "VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
    return {
        "ai21": _import_ai21,
        "aleph_alpha": _import_aleph_alpha,
        "amazon_api_gateway": _import_amazon_api_gateway,
        "amazon_bedrock": _import_bedrock,
        "anthropic": _import_anthropic,
        "anyscale": _import_anyscale,
        "arcee": _import_arcee,
        "aviary": _import_aviary,
        "azure": _import_azure_openai,
        "azureml_endpoint": _import_azureml_endpoint,
        "bananadev": _import_bananadev,
        "baseten": _import_baseten,
        "beam": _import_beam,
        "cerebriumai": _import_cerebriumai,
        "chat_glm": _import_chatglm,
        "clarifai": _import_clarifai,
        "cohere": _import_cohere,
        "ctransformers": _import_ctransformers,
        "ctranslate2": _import_ctranslate2,
        "databricks": _import_databricks,
        "databricks-chat": _import_databricks_chat,
        "deepinfra": _import_deepinfra,
        "deepsparse": _import_deepsparse,
        "edenai": _import_edenai,
        "fake-list": _import_fake,
        "forefrontai": _import_forefrontai,
        "giga-chat-model": _import_gigachat,
        "google_palm": _import_google_palm,
        "gooseai": _import_gooseai,
        "gradient": _import_gradient_ai,
        "gpt4all": _import_gpt4all,
        "huggingface_endpoint": _import_huggingface_endpoint,
        "huggingface_hub": _import_huggingface_hub,
        "huggingface_pipeline": _import_huggingface_pipeline,
        "huggingface_textgen_inference": _import_huggingface_text_gen_inference,
        "human-input": _import_human,
        "koboldai": _import_koboldai,
        "llamacpp": _import_llamacpp,
        "textgen": _import_textgen,
        "minimax": _import_minimax,
        "mlflow": _import_mlflow,
        "mlflow-chat": _import_mlflow_chat,
        "mlflow-ai-gateway": _import_mlflow_ai_gateway,
        "modal": _import_modal,
        "mosaic": _import_mosaicml,
        "nebula": _import_symblai_nebula,
        "nibittensor": _import_bittensor,
        "nlpcloud": _import_nlpcloud,
        "ollama": _import_ollama,
        "openai": _import_openai,
        "openlm": _import_openlm,
        "pai_eas_endpoint": _import_pai_eas_endpoint,
        "petals": _import_petals,
        "pipelineai": _import_pipelineai,
        "predibase": _import_predibase,
        "opaqueprompts": _import_opaqueprompts,
        "replicate": _import_replicate,
        "rwkv": _import_rwkv,
        "sagemaker_endpoint": _import_sagemaker_endpoint,
        "self_hosted": _import_self_hosted,
        "self_hosted_hugging_face": _import_self_hosted_hugging_face,
        "stochasticai": _import_stochasticai,
        "together": _import_together,
        "tongyi": _import_tongyi,
        "titan_takeoff": _import_titan_takeoff,
        "titan_takeoff_pro": _import_titan_takeoff_pro,
        "vertexai": _import_vertex,
        "vertexai_model_garden": _import_vertex_model_garden,
        "openllm": _import_openllm,
        "openllm_client": _import_openllm,
        "vllm": _import_vllm,
        "vllm_openai": _import_vllm_openai,
        "watsonxllm": _import_watsonxllm,
        "writer": _import_writer,
        "xinference": _import_xinference,
        "javelin-ai-gateway": _import_javelin_ai_gateway,
        "qianfan_endpoint": _import_baidu_qianfan_endpoint,
        "yandex_gpt": _import_yandex_gpt,
        "VolcEngineMaasLLM": _import_volcengine_maas,
    }
 | 
	[
  "langchain.utils.interactive_env.is_interactive_env"
] | 
	[((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n    f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n    , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')] | 
| 
	import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
    ENTITY_EXTRACTION_PROMPT,
    ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
    """Abstract base class for Entity store."""
    @abstractmethod
    def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
        """Get entity value from store."""
        pass
    @abstractmethod
    def set(self, key: str, value: Optional[str]) -> None:
        """Set entity value in store."""
        pass
    @abstractmethod
    def delete(self, key: str) -> None:
        """Delete entity value from store."""
        pass
    @abstractmethod
    def exists(self, key: str) -> bool:
        """Check if entity exists in store."""
        pass
    @abstractmethod
    def clear(self) -> None:
        """Delete all entities from store."""
        pass
class InMemoryEntityStore(BaseEntityStore):
    """In-memory Entity store."""
    store: Dict[str, Optional[str]] = {}
    def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
        return self.store.get(key, default)
    def set(self, key: str, value: Optional[str]) -> None:
        self.store[key] = value
    def delete(self, key: str) -> None:
        del self.store[key]
    def exists(self, key: str) -> bool:
        return key in self.store
    def clear(self) -> None:
        return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
    """Upstash Redis backed Entity store.
    Entities get a TTL of 1 day by default, and
    that TTL is extended by 3 days every time the entity is read back.
    """
    def __init__(
        self,
        session_id: str = "default",
        url: str = "",
        token: str = "",
        key_prefix: str = "memory_store",
        ttl: Optional[int] = 60 * 60 * 24,
        recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
        *args: Any,
        **kwargs: Any,
    ):
        try:
            from upstash_redis import Redis
        except ImportError:
            raise ImportError(
                "Could not import upstash_redis python package. "
                "Please install it with `pip install upstash_redis`."
            )
        super().__init__(*args, **kwargs)
        try:
            self.redis_client = Redis(url=url, token=token)
        except Exception:
            logger.error("Upstash Redis instance could not be initiated.")
        self.session_id = session_id
        self.key_prefix = key_prefix
        self.ttl = ttl
        self.recall_ttl = recall_ttl or ttl
    @property
    def full_key_prefix(self) -> str:
        return f"{self.key_prefix}:{self.session_id}"
    def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
        res = (
            self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
            or default
            or ""
        )
        logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
        return res
    def set(self, key: str, value: Optional[str]) -> None:
        if not value:
            return self.delete(key)
        self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
        logger.debug(
            f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
        )
    def delete(self, key: str) -> None:
        self.redis_client.delete(f"{self.full_key_prefix}:{key}")
    def exists(self, key: str) -> bool:
        return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
    def clear(self) -> None:
        def scan_and_delete(cursor: int) -> int:
            cursor, keys_to_delete = self.redis_client.scan(
                cursor, f"{self.full_key_prefix}:*"
            )
            self.redis_client.delete(*keys_to_delete)
            return cursor
        cursor = scan_and_delete(0)
        while cursor != 0:
            scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
    """Redis-backed Entity store.
    Entities get a TTL of 1 day by default, and
    that TTL is extended by 3 days every time the entity is read back.
    """
    redis_client: Any
    session_id: str = "default"
    key_prefix: str = "memory_store"
    ttl: Optional[int] = 60 * 60 * 24
    recall_ttl: Optional[int] = 60 * 60 * 24 * 3
    def __init__(
        self,
        session_id: str = "default",
        url: str = "redis://localhost:6379/0",
        key_prefix: str = "memory_store",
        ttl: Optional[int] = 60 * 60 * 24,
        recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
        *args: Any,
        **kwargs: Any,
    ):
        try:
            import redis
        except ImportError:
            raise ImportError(
                "Could not import redis python package. "
                "Please install it with `pip install redis`."
            )
        super().__init__(*args, **kwargs)
        try:
            self.redis_client = get_client(redis_url=url, decode_responses=True)
        except redis.exceptions.ConnectionError as error:
            logger.error(error)
        self.session_id = session_id
        self.key_prefix = key_prefix
        self.ttl = ttl
        self.recall_ttl = recall_ttl or ttl
    @property
    def full_key_prefix(self) -> str:
        return f"{self.key_prefix}:{self.session_id}"
    def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
        res = (
            self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
            or default
            or ""
        )
        logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
        return res
    def set(self, key: str, value: Optional[str]) -> None:
        if not value:
            return self.delete(key)
        self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
        logger.debug(
            f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
        )
    def delete(self, key: str) -> None:
        self.redis_client.delete(f"{self.full_key_prefix}:{key}")
    def exists(self, key: str) -> bool:
        return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
    def clear(self) -> None:
        # iterate a list in batches of size batch_size
        def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
            iterator = iter(iterable)
            while batch := list(islice(iterator, batch_size)):
                yield batch
        for keybatch in batched(
            self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
        ):
            self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
    """SQLite-backed Entity store"""
    session_id: str = "default"
    table_name: str = "memory_store"
    conn: Any = None
    class Config:
        """Configuration for this pydantic object."""
        arbitrary_types_allowed = True
    def __init__(
        self,
        session_id: str = "default",
        db_file: str = "entities.db",
        table_name: str = "memory_store",
        *args: Any,
        **kwargs: Any,
    ):
        try:
            import sqlite3
        except ImportError:
            raise ImportError(
                "Could not import sqlite3 python package. "
                "Please install it with `pip install sqlite3`."
            )
        super().__init__(*args, **kwargs)
        self.conn = sqlite3.connect(db_file)
        self.session_id = session_id
        self.table_name = table_name
        self._create_table_if_not_exists()
    @property
    def full_table_name(self) -> str:
        return f"{self.table_name}_{self.session_id}"
    def _create_table_if_not_exists(self) -> None:
        create_table_query = f"""
            CREATE TABLE IF NOT EXISTS {self.full_table_name} (
                key TEXT PRIMARY KEY,
                value TEXT
            )
        """
        with self.conn:
            self.conn.execute(create_table_query)
    def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
        query = f"""
            SELECT value
            FROM {self.full_table_name}
            WHERE key = ?
        """
        cursor = self.conn.execute(query, (key,))
        result = cursor.fetchone()
        if result is not None:
            value = result[0]
            return value
        return default
    def set(self, key: str, value: Optional[str]) -> None:
        if not value:
            return self.delete(key)
        query = f"""
            INSERT OR REPLACE INTO {self.full_table_name} (key, value)
            VALUES (?, ?)
        """
        with self.conn:
            self.conn.execute(query, (key, value))
    def delete(self, key: str) -> None:
        query = f"""
            DELETE FROM {self.full_table_name}
            WHERE key = ?
        """
        with self.conn:
            self.conn.execute(query, (key,))
    def exists(self, key: str) -> bool:
        query = f"""
            SELECT 1
            FROM {self.full_table_name}
            WHERE key = ?
            LIMIT 1
        """
        cursor = self.conn.execute(query, (key,))
        result = cursor.fetchone()
        return result is not None
    def clear(self) -> None:
        query = f"""
            DELETE FROM {self.full_table_name}
        """
        with self.conn:
            self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
    """Entity extractor & summarizer memory.
    Extracts named entities from the recent chat history and generates summaries.
    With a swappable entity store, persisting entities across conversations.
    Defaults to an in-memory entity store, and can be swapped out for a Redis,
    SQLite, or other entity store.
    """
    human_prefix: str = "Human"
    ai_prefix: str = "AI"
    llm: BaseLanguageModel
    entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
    entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
    # Cache of recently detected entity names, if any
    # It is updated when load_memory_variables is called:
    entity_cache: List[str] = []
    # Number of recent message pairs to consider when updating entities:
    k: int = 3
    chat_history_key: str = "history"
    # Store to manage entity-related data:
    entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
    @property
    def buffer(self) -> List[BaseMessage]:
        """Access chat memory messages."""
        return self.chat_memory.messages
    @property
    def memory_variables(self) -> List[str]:
        """Will always return list of memory variables.
        :meta private:
        """
        return ["entities", self.chat_history_key]
    def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """
        Returns chat history and all generated entities with summaries if available,
        and updates or clears the recent entity cache.
        New entity name can be found when calling this method, before the entity
        summaries are generated, so the entity cache values may be empty if no entity
        descriptions are generated yet.
        """
        # Create an LLMChain for predicting entity names from the recent chat history:
        chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
        if self.input_key is None:
            prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
        else:
            prompt_input_key = self.input_key
        # Extract an arbitrary window of the last message pairs from
        # the chat history, where the hyperparameter k is the
        # number of message pairs:
        buffer_string = get_buffer_string(
            self.buffer[-self.k * 2 :],
            human_prefix=self.human_prefix,
            ai_prefix=self.ai_prefix,
        )
        # Generates a comma-separated list of named entities,
        # e.g. "Jane, White House, UFO"
        # or "NONE" if no named entities are extracted:
        output = chain.predict(
            history=buffer_string,
            input=inputs[prompt_input_key],
        )
        # If no named entities are extracted, assigns an empty list.
        if output.strip() == "NONE":
            entities = []
        else:
            # Make a list of the extracted entities:
            entities = [w.strip() for w in output.split(",")]
        # Make a dictionary of entities with summary if exists:
        entity_summaries = {}
        for entity in entities:
            entity_summaries[entity] = self.entity_store.get(entity, "")
        # Replaces the entity name cache with the most recently discussed entities,
        # or if no entities were extracted, clears the cache:
        self.entity_cache = entities
        # Should we return as message objects or as a string?
        if self.return_messages:
            # Get last `k` pair of chat messages:
            buffer: Any = self.buffer[-self.k * 2 :]
        else:
            # Reuse the string we made earlier:
            buffer = buffer_string
        return {
            self.chat_history_key: buffer,
            "entities": entity_summaries,
        }
    def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
        """
        Save context from this conversation history to the entity store.
        Generates a summary for each entity in the entity cache by prompting
        the model, and saves these summaries to the entity store.
        """
        super().save_context(inputs, outputs)
        if self.input_key is None:
            prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
        else:
            prompt_input_key = self.input_key
        # Extract an arbitrary window of the last message pairs from
        # the chat history, where the hyperparameter k is the
        # number of message pairs:
        buffer_string = get_buffer_string(
            self.buffer[-self.k * 2 :],
            human_prefix=self.human_prefix,
            ai_prefix=self.ai_prefix,
        )
        input_data = inputs[prompt_input_key]
        # Create an LLMChain for predicting entity summarization from the context
        chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
        # Generate new summaries for entities and save them in the entity store
        for entity in self.entity_cache:
            # Get existing summary if it exists
            existing_summary = self.entity_store.get(entity, "")
            output = chain.predict(
                summary=existing_summary,
                entity=entity,
                history=buffer_string,
                input=input_data,
            )
            # Save the updated summary to the entity store
            self.entity_store.set(entity, output.strip())
    def clear(self) -> None:
        """Clear memory contents."""
        self.chat_memory.clear()
        self.entity_cache.clear()
        self.entity_store.clear()
 | 
	[
  "langchain_community.utilities.redis.get_client",
  "langchain.chains.llm.LLMChain",
  "langchain.memory.utils.get_prompt_input_key",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.messages.get_buffer_string"
] | 
	[((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n    ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n    ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')] | 
| 
	from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMemory(BaseChatMemory):
    """Buffer for storing conversation memory."""
    human_prefix: str = "Human"
    ai_prefix: str = "AI"
    memory_key: str = "history"  #: :meta private:
    @property
    def buffer(self) -> Any:
        """String buffer of memory."""
        return self.buffer_as_messages if self.return_messages else self.buffer_as_str
    async def abuffer(self) -> Any:
        """String buffer of memory."""
        return (
            await self.abuffer_as_messages()
            if self.return_messages
            else await self.abuffer_as_str()
        )
    def _buffer_as_str(self, messages: List[BaseMessage]) -> str:
        return get_buffer_string(
            messages,
            human_prefix=self.human_prefix,
            ai_prefix=self.ai_prefix,
        )
    @property
    def buffer_as_str(self) -> str:
        """Exposes the buffer as a string in case return_messages is True."""
        return self._buffer_as_str(self.chat_memory.messages)
    async def abuffer_as_str(self) -> str:
        """Exposes the buffer as a string in case return_messages is True."""
        messages = await self.chat_memory.aget_messages()
        return self._buffer_as_str(messages)
    @property
    def buffer_as_messages(self) -> List[BaseMessage]:
        """Exposes the buffer as a list of messages in case return_messages is False."""
        return self.chat_memory.messages
    async def abuffer_as_messages(self) -> List[BaseMessage]:
        """Exposes the buffer as a list of messages in case return_messages is False."""
        return await self.chat_memory.aget_messages()
    @property
    def memory_variables(self) -> List[str]:
        """Will always return list of memory variables.
        :meta private:
        """
        return [self.memory_key]
    def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """Return history buffer."""
        return {self.memory_key: self.buffer}
    async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """Return key-value pairs given the text input to the chain."""
        buffer = await self.abuffer()
        return {self.memory_key: buffer}
class ConversationStringBufferMemory(BaseMemory):
    """Buffer for storing conversation memory."""
    human_prefix: str = "Human"
    ai_prefix: str = "AI"
    """Prefix to use for AI generated responses."""
    buffer: str = ""
    output_key: Optional[str] = None
    input_key: Optional[str] = None
    memory_key: str = "history"  #: :meta private:
    @root_validator()
    def validate_chains(cls, values: Dict) -> Dict:
        """Validate that return messages is not True."""
        if values.get("return_messages", False):
            raise ValueError(
                "return_messages must be False for ConversationStringBufferMemory"
            )
        return values
    @property
    def memory_variables(self) -> List[str]:
        """Will always return list of memory variables.
        :meta private:
        """
        return [self.memory_key]
    def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
        """Return history buffer."""
        return {self.memory_key: self.buffer}
    async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
        """Return history buffer."""
        return self.load_memory_variables(inputs)
    def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
        """Save context from this conversation to buffer."""
        if self.input_key is None:
            prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
        else:
            prompt_input_key = self.input_key
        if self.output_key is None:
            if len(outputs) != 1:
                raise ValueError(f"One output key expected, got {outputs.keys()}")
            output_key = list(outputs.keys())[0]
        else:
            output_key = self.output_key
        human = f"{self.human_prefix}: " + inputs[prompt_input_key]
        ai = f"{self.ai_prefix}: " + outputs[output_key]
        self.buffer += "\n" + "\n".join([human, ai])
    async def asave_context(
        self, inputs: Dict[str, Any], outputs: Dict[str, str]
    ) -> None:
        """Save context from this conversation to buffer."""
        return self.save_context(inputs, outputs)
    def clear(self) -> None:
        """Clear memory contents."""
        self.buffer = ""
    async def aclear(self) -> None:
        self.clear()
 | 
	[
  "langchain.memory.utils.get_prompt_input_key",
  "langchain_core.messages.get_buffer_string",
  "langchain_core.pydantic_v1.root_validator"
] | 
	[((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n    ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')] | 
| 
	"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
    AnyMessage,
    BaseMessage,
    HumanMessage,
    get_buffer_string,
)
class PromptValue(Serializable, ABC):
    """Base abstract class for inputs to any language model.
    PromptValues can be converted to both LLM (pure text-generation) inputs and
        ChatModel inputs.
    """
    @classmethod
    def is_lc_serializable(cls) -> bool:
        """Return whether this class is serializable."""
        return True
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "prompt"]
    @abstractmethod
    def to_string(self) -> str:
        """Return prompt value as string."""
    @abstractmethod
    def to_messages(self) -> List[BaseMessage]:
        """Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
    """String prompt value."""
    text: str
    """Prompt text."""
    type: Literal["StringPromptValue"] = "StringPromptValue"
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "prompts", "base"]
    def to_string(self) -> str:
        """Return prompt as string."""
        return self.text
    def to_messages(self) -> List[BaseMessage]:
        """Return prompt as messages."""
        return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
    """Chat prompt value.
    A type of a prompt value that is built from messages.
    """
    messages: Sequence[BaseMessage]
    """List of messages."""
    def to_string(self) -> str:
        """Return prompt as string."""
        return get_buffer_string(self.messages)
    def to_messages(self) -> List[BaseMessage]:
        """Return prompt as a list of messages."""
        return list(self.messages)
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
    detail: Literal["auto", "low", "high"]
    """Specifies the detail level of the image."""
    url: str
    """Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
    """Image prompt value."""
    image_url: ImageURL
    """Prompt image."""
    type: Literal["ImagePromptValue"] = "ImagePromptValue"
    def to_string(self) -> str:
        """Return prompt as string."""
        return self.image_url["url"]
    def to_messages(self) -> List[BaseMessage]:
        """Return prompt as messages."""
        return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
    """Chat prompt value which explicitly lists out the message types it accepts.
    For use in external schemas."""
    messages: Sequence[AnyMessage]
    type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "prompts", "chat"]
 | 
	[
  "langchain_core.messages.HumanMessage",
  "langchain_core.messages.get_buffer_string"
] | 
	[((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'self.text'}), '(content=self.text)\n', (1812, 1831), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((3085, 3111), 'typing.cast', 'cast', (['dict', 'self.image_url'], {}), '(dict, self.image_url)\n', (3089, 3111), False, 'from typing import List, Literal, Sequence, cast\n')] | 
| 
	from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_dicts
from langchain_core.utils.interactive_env import is_interactive_env
if TYPE_CHECKING:
    from langchain_core.prompts.chat import ChatPromptTemplate
class BaseMessage(Serializable):
    """Base abstract Message class.
    Messages are the inputs and outputs of ChatModels.
    """
    content: Union[str, List[Union[str, Dict]]]
    """The string contents of the message."""
    additional_kwargs: dict = Field(default_factory=dict)
    """Reserved for additional payload data associated with the message.
    
    For example, for a message from an AI, this could include tool calls."""
    response_metadata: dict = Field(default_factory=dict)
    """Response metadata. For example: response headers, logprobs, token counts."""
    type: str
    name: Optional[str] = None
    id: Optional[str] = None
    class Config:
        extra = Extra.allow
    def __init__(
        self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any
    ) -> None:
        """Pass in content as positional arg."""
        return super().__init__(content=content, **kwargs)
    @classmethod
    def is_lc_serializable(cls) -> bool:
        """Return whether this class is serializable."""
        return True
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "messages"]
    def __add__(self, other: Any) -> ChatPromptTemplate:
        from langchain_core.prompts.chat import ChatPromptTemplate
        prompt = ChatPromptTemplate(messages=[self])  # type: ignore[call-arg]
        return prompt + other
    def pretty_repr(self, html: bool = False) -> str:
        title = get_msg_title_repr(self.type.title() + " Message", bold=html)
        # TODO: handle non-string content.
        if self.name is not None:
            title += f"\nName: {self.name}"
        return f"{title}\n\n{self.content}"
    def pretty_print(self) -> None:
        print(self.pretty_repr(html=is_interactive_env()))  # noqa: T201
def merge_content(
    first_content: Union[str, List[Union[str, Dict]]],
    second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
    """Merge two message contents.
    Args:
        first_content: The first content.
        second_content: The second content.
    Returns:
        The merged content.
    """
    # If first chunk is a string
    if isinstance(first_content, str):
        # If the second chunk is also a string, then merge them naively
        if isinstance(second_content, str):
            return first_content + second_content
        # If the second chunk is a list, add the first chunk to the start of the list
        else:
            return_list: List[Union[str, Dict]] = [first_content]
            return return_list + second_content
    # If both are lists, merge them naively
    elif isinstance(second_content, List):
        return first_content + second_content
    # If the first content is a list, and the second content is a string
    else:
        # If the last element of the first content is a string
        # Add the second content to the last element
        if isinstance(first_content[-1], str):
            return first_content[:-1] + [first_content[-1] + second_content]
        else:
            # Otherwise, add the second content as a new element of the list
            return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
    """Message chunk, which can be concatenated with other Message chunks."""
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "messages"]
    def __add__(self, other: Any) -> BaseMessageChunk:  # type: ignore
        if isinstance(other, BaseMessageChunk):
            # If both are (subclasses of) BaseMessageChunk,
            # concat into a single BaseMessageChunk
            return self.__class__(  # type: ignore[call-arg]
                id=self.id,
                content=merge_content(self.content, other.content),
                additional_kwargs=merge_dicts(
                    self.additional_kwargs, other.additional_kwargs
                ),
                response_metadata=merge_dicts(
                    self.response_metadata, other.response_metadata
                ),
            )
        else:
            raise TypeError(
                'unsupported operand type(s) for +: "'
                f"{self.__class__.__name__}"
                f'" and "{other.__class__.__name__}"'
            )
def message_to_dict(message: BaseMessage) -> dict:
    """Convert a Message to a dictionary.
    Args:
        message: Message to convert.
    Returns:
        Message as a dict.
    """
    return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
    """Convert a sequence of Messages to a list of dictionaries.
    Args:
        messages: Sequence of messages (as BaseMessages) to convert.
    Returns:
        List of messages as dicts.
    """
    return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
    """Get a title representation for a message.
    Args:
        title: The title.
        bold: Whether to bold the title.
    Returns:
        The title representation.
    """
    padded = " " + title + " "
    sep_len = (80 - len(padded)) // 2
    sep = "=" * sep_len
    second_sep = sep + "=" if len(padded) % 2 else sep
    if bold:
        padded = get_bolded_text(padded)
    return f"{sep}{padded}{second_sep}"
 | 
	[
  "langchain_core.utils.get_bolded_text",
  "langchain_core.utils.interactive_env.is_interactive_env",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.prompts.chat.ChatPromptTemplate",
  "langchain_core.utils._merge.merge_dicts"
] | 
	[((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (955, 977), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((1850, 1885), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (1868, 1885), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((5928, 5951), 'langchain_core.utils.get_bolded_text', 'get_bolded_text', (['padded'], {}), '(padded)\n', (5943, 5951), False, 'from langchain_core.utils import get_bolded_text\n'), ((2313, 2333), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2331, 2333), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((4467, 4527), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (4478, 4527), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((4601, 4661), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (4612, 4661), False, 'from langchain_core.utils._merge import merge_dicts\n')] | 
| 
	"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
LANGCHAINHUB_REPO = "https://raw.githubusercontent.com/hwchase17/langchain-hub/"
URL_BASE = os.environ.get(
    "LANGCHAIN_HUB_URL_BASE",
    LANGCHAINHUB_REPO + "{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
@deprecated(
    since="0.1.30",
    removal="0.2",
    message=(
        "Using the hwchase17/langchain-hub "
        "repo for prompts is deprecated. Please use "
        "https://smith.langchain.com/hub instead."
    ),
)
def try_load_from_hub(
    path: Union[str, Path],
    loader: Callable[[str], T],
    valid_prefix: str,
    valid_suffixes: Set[str],
    **kwargs: Any,
) -> Optional[T]:
    """Load configuration from hub.  Returns None if path is not a hub path."""
    if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
        return None
    ref, remote_path_str = match.groups()
    ref = ref[1:] if ref else DEFAULT_REF
    remote_path = Path(remote_path_str)
    if remote_path.parts[0] != valid_prefix:
        return None
    if remote_path.suffix[1:] not in valid_suffixes:
        raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
    # Using Path with URLs is not recommended, because on Windows
    # the backslash is used as the path separator, which can cause issues
    # when working with URLs that use forward slashes as the path separator.
    # Instead, use PurePosixPath to ensure that forward slashes are used as the
    # path separator, regardless of the operating system.
    full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
    if not full_url.startswith(LANGCHAINHUB_REPO):
        raise ValueError(f"Invalid hub path: {path}")
    r = requests.get(full_url, timeout=5)
    if r.status_code != 200:
        raise ValueError(f"Could not find file at {full_url}")
    with tempfile.TemporaryDirectory() as tmpdirname:
        file = Path(tmpdirname) / remote_path.name
        with open(file, "wb") as f:
            f.write(r.content)
        return loader(str(file), **kwargs)
 | 
	[
  "langchain_core._api.deprecation.deprecated"
] | 
	[((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGCHAIN_HUB_URL_BASE', LANGCHAINHUB_REPO + '{ref}/')\n", (490, 546), False, 'import os\n'), ((572, 619), 're.compile', 're.compile', (['"""lc(?P<ref>@[^:]+)?://(?P<path>.*)"""'], {}), "('lc(?P<ref>@[^:]+)?://(?P<path>.*)')\n", (582, 619), False, 'import re\n'), ((626, 638), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (633, 638), False, 'from typing import Any, Callable, Optional, Set, TypeVar, Union\n'), ((642, 822), 'langchain_core._api.deprecation.deprecated', 'deprecated', ([], {'since': '"""0.1.30"""', 'removal': '"""0.2"""', 'message': '"""Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead."""'}), "(since='0.1.30', removal='0.2', message=\n    'Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.'\n    )\n", (652, 822), False, 'from langchain_core._api.deprecation import deprecated\n'), ((1317, 1338), 'pathlib.Path', 'Path', (['remote_path_str'], {}), '(remote_path_str)\n', (1321, 1338), False, 'from pathlib import Path, PurePosixPath\n'), ((2099, 2132), 'requests.get', 'requests.get', (['full_url'], {'timeout': '(5)'}), '(full_url, timeout=5)\n', (2111, 2132), False, 'import requests\n'), ((2234, 2263), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2261, 2263), False, 'import tempfile\n'), ((2294, 2310), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (2298, 2310), False, 'from pathlib import Path, PurePosixPath\n'), ((1947, 1973), 'pathlib.PurePosixPath', 'PurePosixPath', (['remote_path'], {}), '(remote_path)\n', (1960, 1973), False, 'from pathlib import Path, PurePosixPath\n')] | 
| 
	"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.loading import try_load_from_hub
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
    config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
    config_type = config.pop("_type")
    if config_type not in AGENT_TO_CLASS:
        raise ValueError(f"Loading {config_type} agent not supported")
    agent_cls = AGENT_TO_CLASS[config_type]
    combined_config = {**config, **kwargs}
    return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="0.2.0")
def load_agent_from_config(
    config: dict,
    llm: Optional[BaseLanguageModel] = None,
    tools: Optional[List[Tool]] = None,
    **kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
    """Load agent from Config Dict.
    Args:
        config: Config dict to load agent from.
        llm: Language model to use as the agent.
        tools: List of tools this agent has access to.
        **kwargs: Additional keyword arguments passed to the agent executor.
    Returns:
        An agent executor.
    """
    if "_type" not in config:
        raise ValueError("Must specify an agent Type in config")
    load_from_tools = config.pop("load_from_llm_and_tools", False)
    if load_from_tools:
        if llm is None:
            raise ValueError(
                "If `load_from_llm_and_tools` is set to True, "
                "then LLM must be provided"
            )
        if tools is None:
            raise ValueError(
                "If `load_from_llm_and_tools` is set to True, "
                "then tools must be provided"
            )
        return _load_agent_from_tools(config, llm, tools, **kwargs)
    config_type = config.pop("_type")
    if config_type not in AGENT_TO_CLASS:
        raise ValueError(f"Loading {config_type} agent not supported")
    agent_cls = AGENT_TO_CLASS[config_type]
    if "llm_chain" in config:
        config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
    elif "llm_chain_path" in config:
        config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
    else:
        raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
    if "output_parser" in config:
        logger.warning(
            "Currently loading output parsers on agent is not supported, "
            "will just use the default one."
        )
        del config["output_parser"]
    combined_config = {**config, **kwargs}
    return agent_cls(**combined_config)  # type: ignore
@deprecated("0.1.0", removal="0.2.0")
def load_agent(
    path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
    """Unified method for loading an agent from LangChainHub or local fs.
    Args:
        path: Path to the agent file.
        **kwargs: Additional keyword arguments passed to the agent executor.
    Returns:
        An agent executor.
    """
    valid_suffixes = {"json", "yaml"}
    if hub_result := try_load_from_hub(
        path, _load_agent_from_file, "agents", valid_suffixes
    ):
        return hub_result
    else:
        return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
    file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
    """Load agent from file."""
    valid_suffixes = {"json", "yaml"}
    # Convert file to Path object.
    if isinstance(file, str):
        file_path = Path(file)
    else:
        file_path = file
    # Load from either json or yaml.
    if file_path.suffix[1:] == "json":
        with open(file_path) as f:
            config = json.load(f)
    elif file_path.suffix[1:] == "yaml":
        with open(file_path, "r") as f:
            config = yaml.safe_load(f)
    else:
        raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
    # Load the agent from the config now.
    return load_agent_from_config(config, **kwargs)
 | 
	[
  "langchain_core._api.deprecated",
  "langchain_core.utils.loading.try_load_from_hub"
] | 
	[((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api import deprecated\n'), ((3172, 3208), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (3182, 3208), False, 'from langchain_core._api import deprecated\n'), ((3632, 3704), 'langchain_core.utils.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_agent_from_file', '"""agents"""', 'valid_suffixes'], {}), "(path, _load_agent_from_file, 'agents', valid_suffixes)\n", (3649, 3704), False, 'from langchain_core.utils.loading import try_load_from_hub\n'), ((4092, 4102), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (4096, 4102), False, 'from pathlib import Path\n'), ((4270, 4282), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4279, 4282), False, 'import json\n'), ((4385, 4402), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4399, 4402), False, 'import yaml\n')] | 
| 
	"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.utils import get_colored_text
from langchain_core.utils.formatting import formatter
from langchain_core.utils.interactive_env import is_interactive_env
def jinja2_formatter(template: str, **kwargs: Any) -> str:
    """Format a template using jinja2.
    *Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
        SandboxedEnvironment by default. However, this sand-boxing should
        be treated as a best-effort approach rather than a guarantee of security.
        Do not accept jinja2 templates from untrusted sources as they may lead
        to arbitrary Python code execution.
        https://jinja.palletsprojects.com/en/3.1.x/sandbox/
    """
    try:
        from jinja2.sandbox import SandboxedEnvironment
    except ImportError:
        raise ImportError(
            "jinja2 not installed, which is needed to use the jinja2_formatter. "
            "Please install it with `pip install jinja2`."
            "Please be cautious when using jinja2 templates. "
            "Do not expand jinja2 templates using unverified or user-controlled "
            "inputs as that can result in arbitrary Python code execution."
        )
    # This uses a sandboxed environment to prevent arbitrary code execution.
    # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
    # Please treat this sand-boxing as a best-effort approach rather than
    # a guarantee of security.
    # We recommend to never use jinja2 templates with untrusted inputs.
    # https://jinja.palletsprojects.com/en/3.1.x/sandbox/
    # approach not a guarantee of security.
    return SandboxedEnvironment().from_string(template).render(**kwargs)
def validate_jinja2(template: str, input_variables: List[str]) -> None:
    """
    Validate that the input variables are valid for the template.
    Issues a warning if missing or extra variables are found.
    Args:
        template: The template string.
        input_variables: The input variables.
    """
    input_variables_set = set(input_variables)
    valid_variables = _get_jinja2_variables_from_template(template)
    missing_variables = valid_variables - input_variables_set
    extra_variables = input_variables_set - valid_variables
    warning_message = ""
    if missing_variables:
        warning_message += f"Missing variables: {missing_variables} "
    if extra_variables:
        warning_message += f"Extra variables: {extra_variables}"
    if warning_message:
        warnings.warn(warning_message.strip())
def _get_jinja2_variables_from_template(template: str) -> Set[str]:
    try:
        from jinja2 import Environment, meta
    except ImportError:
        raise ImportError(
            "jinja2 not installed, which is needed to use the jinja2_formatter. "
            "Please install it with `pip install jinja2`."
        )
    env = Environment()
    ast = env.parse(template)
    variables = meta.find_undeclared_variables(ast)
    return variables
DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = {
    "f-string": formatter.format,
    "jinja2": jinja2_formatter,
}
DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = {
    "f-string": formatter.validate_input_variables,
    "jinja2": validate_jinja2,
}
def check_valid_template(
    template: str, template_format: str, input_variables: List[str]
) -> None:
    """Check that template string is valid.
    Args:
        template: The template string.
        template_format: The template format. Should be one of "f-string" or "jinja2".
        input_variables: The input variables.
    Raises:
        ValueError: If the template format is not supported.
    """
    try:
        validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
    except KeyError as exc:
        raise ValueError(
            f"Invalid template format {template_format!r}, should be one of"
            f" {list(DEFAULT_FORMATTER_MAPPING)}."
        ) from exc
    try:
        validator_func(template, input_variables)
    except (KeyError, IndexError) as exc:
        raise ValueError(
            "Invalid prompt schema; check for mismatched or missing input parameters"
            f" from {input_variables}."
        ) from exc
def get_template_variables(template: str, template_format: str) -> List[str]:
    """Get the variables from the template.
    Args:
        template: The template string.
        template_format: The template format. Should be one of "f-string" or "jinja2".
    Returns:
        The variables from the template.
    Raises:
        ValueError: If the template format is not supported.
    """
    if template_format == "jinja2":
        # Get the variables for the template
        input_variables = _get_jinja2_variables_from_template(template)
    elif template_format == "f-string":
        input_variables = {
            v for _, v, _, _ in Formatter().parse(template) if v is not None
        }
    else:
        raise ValueError(f"Unsupported template format: {template_format}")
    return sorted(input_variables)
class StringPromptTemplate(BasePromptTemplate, ABC):
    """String prompt that exposes the format method, returning a prompt."""
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "prompts", "base"]
    def format_prompt(self, **kwargs: Any) -> PromptValue:
        """Create Chat Messages."""
        return StringPromptValue(text=self.format(**kwargs))
    def pretty_repr(self, html: bool = False) -> str:
        # TODO: handle partials
        dummy_vars = {
            input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
        }
        if html:
            dummy_vars = {
                k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
            }
        return self.format(**dummy_vars)
    def pretty_print(self) -> None:
        print(self.pretty_repr(html=is_interactive_env()))  # noqa: T201
 | 
	[
  "langchain_core.utils.get_colored_text",
  "langchain_core.utils.interactive_env.is_interactive_env"
] | 
	[((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((6074, 6103), 'langchain_core.utils.get_colored_text', 'get_colored_text', (['v', '"""yellow"""'], {}), "(v, 'yellow')\n", (6090, 6103), False, 'from langchain_core.utils import get_colored_text\n'), ((1946, 1968), 'jinja2.sandbox.SandboxedEnvironment', 'SandboxedEnvironment', ([], {}), '()\n', (1966, 1968), False, 'from jinja2.sandbox import SandboxedEnvironment\n'), ((6263, 6283), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (6281, 6283), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((5171, 5182), 'string.Formatter', 'Formatter', ([], {}), '()\n', (5180, 5182), False, 'from string import Formatter\n')] | 
| 
	"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
    ToolMetaclass --> BaseTool --> <name>Tool  # Examples: AIPluginTool, BaseGraphQLTool
                                   <name>      # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
    CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain.utils.interactive_env import is_interactive_env
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_python_tool_PythonAstREPLTool() -> Any:
    raise ImportError(
        "This tool has been moved to langchain experiment. "
        "This tool has access to a python REPL. "
        "For best practices make sure to sandbox this tool. "
        "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
        "To keep using this code as is, install langchain experimental and "
        "update relevant imports replacing 'langchain' with 'langchain_experimental'"
    )
def _import_python_tool_PythonREPLTool() -> Any:
    raise ImportError(
        "This tool has been moved to langchain experiment. "
        "This tool has access to a python REPL. "
        "For best practices make sure to sandbox this tool. "
        "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
        "To keep using this code as is, install langchain experimental and "
        "update relevant imports replacing 'langchain' with 'langchain_experimental'"
    )
def __getattr__(name: str) -> Any:
    if name == "PythonAstREPLTool":
        return _import_python_tool_PythonAstREPLTool()
    elif name == "PythonREPLTool":
        return _import_python_tool_PythonREPLTool()
    else:
        from langchain_community import tools
        # If not in interactive env, raise warning.
        if not is_interactive_env():
            warnings.warn(
                "Importing tools from langchain is deprecated. Importing from "
                "langchain will no longer be supported as of langchain==0.2.0. "
                "Please import from langchain-community instead:\n\n"
                f"`from langchain_community.tools import {name}`.\n\n"
                "To install langchain-community run "
                "`pip install -U langchain-community`.",
                category=LangChainDeprecationWarning,
            )
        return getattr(tools, name)
__all__ = [
    "AINAppOps",
    "AINOwnerOps",
    "AINRuleOps",
    "AINTransfer",
    "AINValueOps",
    "AIPluginTool",
    "APIOperation",
    "ArxivQueryRun",
    "AzureCogsFormRecognizerTool",
    "AzureCogsImageAnalysisTool",
    "AzureCogsSpeech2TextTool",
    "AzureCogsText2SpeechTool",
    "AzureCogsTextAnalyticsHealthTool",
    "BaseGraphQLTool",
    "BaseRequestsTool",
    "BaseSQLDatabaseTool",
    "BaseSparkSQLTool",
    "BaseTool",
    "BearlyInterpreterTool",
    "BingSearchResults",
    "BingSearchRun",
    "BraveSearch",
    "ClickTool",
    "CopyFileTool",
    "CurrentWebPageTool",
    "DeleteFileTool",
    "DuckDuckGoSearchResults",
    "DuckDuckGoSearchRun",
    "E2BDataAnalysisTool",
    "EdenAiExplicitImageTool",
    "EdenAiObjectDetectionTool",
    "EdenAiParsingIDTool",
    "EdenAiParsingInvoiceTool",
    "EdenAiSpeechToTextTool",
    "EdenAiTextModerationTool",
    "EdenAiTextToSpeechTool",
    "EdenaiTool",
    "ElevenLabsText2SpeechTool",
    "ExtractHyperlinksTool",
    "ExtractTextTool",
    "FileSearchTool",
    "GetElementsTool",
    "GmailCreateDraft",
    "GmailGetMessage",
    "GmailGetThread",
    "GmailSearch",
    "GmailSendMessage",
    "GoogleCloudTextToSpeechTool",
    "GooglePlacesTool",
    "GoogleSearchResults",
    "GoogleSearchRun",
    "GoogleSerperResults",
    "GoogleSerperRun",
    "SearchAPIResults",
    "SearchAPIRun",
    "HumanInputRun",
    "IFTTTWebhook",
    "InfoPowerBITool",
    "InfoSQLDatabaseTool",
    "InfoSparkSQLTool",
    "JiraAction",
    "JsonGetValueTool",
    "JsonListKeysTool",
    "ListDirectoryTool",
    "ListPowerBITool",
    "ListSQLDatabaseTool",
    "ListSparkSQLTool",
    "MerriamWebsterQueryRun",
    "MetaphorSearchResults",
    "MoveFileTool",
    "NasaAction",
    "NavigateBackTool",
    "NavigateTool",
    "O365CreateDraftMessage",
    "O365SearchEmails",
    "O365SearchEvents",
    "O365SendEvent",
    "O365SendMessage",
    "OpenAPISpec",
    "OpenWeatherMapQueryRun",
    "PubmedQueryRun",
    "RedditSearchRun",
    "QueryCheckerTool",
    "QueryPowerBITool",
    "QuerySQLCheckerTool",
    "QuerySQLDataBaseTool",
    "QuerySparkSQLTool",
    "ReadFileTool",
    "RequestsDeleteTool",
    "RequestsGetTool",
    "RequestsPatchTool",
    "RequestsPostTool",
    "RequestsPutTool",
    "SteamWebAPIQueryRun",
    "SceneXplainTool",
    "SearxSearchResults",
    "SearxSearchRun",
    "ShellTool",
    "SlackGetChannel",
    "SlackGetMessage",
    "SlackScheduleMessage",
    "SlackSendMessage",
    "SleepTool",
    "StdInInquireTool",
    "StackExchangeTool",
    "SteamshipImageGenerationTool",
    "StructuredTool",
    "Tool",
    "VectorStoreQATool",
    "VectorStoreQAWithSourcesTool",
    "WikipediaQueryRun",
    "WolframAlphaQueryRun",
    "WriteFileTool",
    "YahooFinanceNewsTool",
    "YouTubeSearchTool",
    "ZapierNLAListActions",
    "ZapierNLARunAction",
    "format_tool_to_openai_function",
    "tool",
]
 | 
	[
  "langchain.utils.interactive_env.is_interactive_env"
] | 
	[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n    f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n    , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')] | 
| 
	from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
    """Class for storing a piece of text and associated metadata."""
    page_content: str
    """String text."""
    metadata: dict = Field(default_factory=dict)
    """Arbitrary metadata about the page content (e.g., source, relationships to other
        documents, etc.).
    """
    type: Literal["Document"] = "Document"
    def __init__(self, page_content: str, **kwargs: Any) -> None:
        """Pass page_content in as positional or named arg."""
        super().__init__(page_content=page_content, **kwargs)
    @classmethod
    def is_lc_serializable(cls) -> bool:
        """Return whether this class is serializable."""
        return True
    @classmethod
    def get_lc_namespace(cls) -> List[str]:
        """Get the namespace of the langchain object."""
        return ["langchain", "schema", "document"]
 | 
	[
  "langchain_core.pydantic_v1.Field"
] | 
	[((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] | 
| 
	"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import deprecated
from langchain_core.outputs import LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
@deprecated("0.1.0", alternative="Use string instead.", removal="0.2.0")
def RunTypeEnum() -> Type[RunTypeEnumDep]:
    """RunTypeEnum."""
    warnings.warn(
        "RunTypeEnum is deprecated. Please directly use a string instead"
        " (e.g. 'llm', 'chain', 'tool').",
        DeprecationWarning,
    )
    return RunTypeEnumDep
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Base(BaseModel):
    """Base class for TracerSessionV1."""
    start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
    name: Optional[str] = None
    extra: Optional[Dict[str, Any]] = None
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Create(TracerSessionV1Base):
    """Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1(TracerSessionV1Base):
    """TracerSessionV1 schema."""
    id: int
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionBase(TracerSessionV1Base):
    """Base class for TracerSession."""
    tenant_id: UUID
@deprecated("0.1.0", removal="0.2.0")
class TracerSession(TracerSessionBase):
    """TracerSessionV1 schema for the V2 API."""
    id: UUID
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class BaseRun(BaseModel):
    """Base class for Run."""
    uuid: str
    parent_uuid: Optional[str] = None
    start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
    end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
    extra: Optional[Dict[str, Any]] = None
    execution_order: int
    child_execution_order: int
    serialized: Dict[str, Any]
    session_id: int
    error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class LLMRun(BaseRun):
    """Class for LLMRun."""
    prompts: List[str]
    response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ChainRun(BaseRun):
    """Class for ChainRun."""
    inputs: Dict[str, Any]
    outputs: Optional[Dict[str, Any]] = None
    child_llm_runs: List[LLMRun] = Field(default_factory=list)
    child_chain_runs: List[ChainRun] = Field(default_factory=list)
    child_tool_runs: List[ToolRun] = Field(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ToolRun(BaseRun):
    """Class for ToolRun."""
    tool_input: str
    output: Optional[str] = None
    action: str
    child_llm_runs: List[LLMRun] = Field(default_factory=list)
    child_chain_runs: List[ChainRun] = Field(default_factory=list)
    child_tool_runs: List[ToolRun] = Field(default_factory=list)
# Begin V2 API Schemas
class Run(BaseRunV2):
    """Run schema for the V2 API in the Tracer."""
    execution_order: int
    child_execution_order: int
    child_runs: List[Run] = Field(default_factory=list)
    tags: Optional[List[str]] = Field(default_factory=list)
    events: List[Dict[str, Any]] = Field(default_factory=list)
    trace_id: Optional[UUID] = None
    dotted_order: Optional[str] = None
    @root_validator(pre=True)
    def assign_name(cls, values: dict) -> dict:
        """Assign name to the run."""
        if values.get("name") is None:
            if "name" in values["serialized"]:
                values["name"] = values["serialized"]["name"]
            elif "id" in values["serialized"]:
                values["name"] = values["serialized"]["id"][-1]
        if values.get("events") is None:
            values["events"] = []
        return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
Run.update_forward_refs()
__all__ = [
    "BaseRun",
    "ChainRun",
    "LLMRun",
    "Run",
    "RunTypeEnum",
    "ToolRun",
    "TracerSession",
    "TracerSessionBase",
    "TracerSessionV1",
    "TracerSessionV1Base",
    "TracerSessionV1Create",
]
 | 
	[
  "langchain_core._api.deprecated",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.pydantic_v1.root_validator"
] | 
	[((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (791, 817), False, 'from langchain_core._api import deprecated\n'), ((1060, 1096), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1070, 1096), False, 'from langchain_core._api import deprecated\n'), ((1194, 1230), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1204, 1230), False, 'from langchain_core._api import deprecated\n'), ((1325, 1361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1335, 1361), False, 'from langchain_core._api import deprecated\n'), ((1472, 1508), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1482, 1508), False, 'from langchain_core._api import deprecated\n'), ((1615, 1670), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (1625, 1670), False, 'from langchain_core._api import deprecated\n'), ((2131, 2186), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2141, 2186), False, 'from langchain_core._api import deprecated\n'), ((2306, 2361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2316, 2361), False, 'from langchain_core._api import deprecated\n'), ((2688, 2743), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2698, 2743), False, 'from langchain_core._api import deprecated\n'), ((586, 727), 'warnings.warn', 'warnings.warn', (['"""RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."""', 'DeprecationWarning'], {}), '(\n    "RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."\n    , DeprecationWarning)\n', (599, 727), False, 'import warnings\n'), ((935, 982), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (940, 982), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1816, 1863), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1821, 1863), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1898, 1945), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1903, 1945), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2525, 2552), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2530, 2552), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2592, 2619), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2597, 2619), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2657, 2684), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2662, 2684), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2902, 2929), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2907, 2929), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2969, 2996), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2974, 2996), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3034, 3061), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3039, 3061), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3247, 3274), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3252, 3274), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3307, 3334), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3312, 3334), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3370, 3397), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3375, 3397), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3479, 3503), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3493, 3503), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n')] | 
| 
	"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
    """Load prompt from Config Dict."""
    if "_type" not in config:
        logger.warning("No `_type` key found, defaulting to `prompt`.")
    config_type = config.pop("_type", "prompt")
    if config_type not in type_to_loader_dict:
        raise ValueError(f"Loading {config_type} prompt not supported")
    prompt_loader = type_to_loader_dict[config_type]
    return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
    """Load template from the path if applicable."""
    # Check if template_path exists in config.
    if f"{var_name}_path" in config:
        # If it does, make sure template variable doesn't also exist.
        if var_name in config:
            raise ValueError(
                f"Both `{var_name}_path` and `{var_name}` cannot be provided."
            )
        # Pop the template path from the config.
        template_path = Path(config.pop(f"{var_name}_path"))
        # Load the template.
        if template_path.suffix == ".txt":
            with open(template_path) as f:
                template = f.read()
        else:
            raise ValueError
        # Set the template variable to the extracted variable.
        config[var_name] = template
    return config
def _load_examples(config: dict) -> dict:
    """Load examples if necessary."""
    if isinstance(config["examples"], list):
        pass
    elif isinstance(config["examples"], str):
        with open(config["examples"]) as f:
            if config["examples"].endswith(".json"):
                examples = json.load(f)
            elif config["examples"].endswith((".yaml", ".yml")):
                examples = yaml.safe_load(f)
            else:
                raise ValueError(
                    "Invalid file format. Only json or yaml formats are supported."
                )
        config["examples"] = examples
    else:
        raise ValueError("Invalid examples format. Only list or string are supported.")
    return config
def _load_output_parser(config: dict) -> dict:
    """Load output parser."""
    if "output_parser" in config and config["output_parser"]:
        _config = config.pop("output_parser")
        output_parser_type = _config.pop("_type")
        if output_parser_type == "default":
            output_parser = StrOutputParser(**_config)
        else:
            raise ValueError(f"Unsupported output parser {output_parser_type}")
        config["output_parser"] = output_parser
    return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
    """Load the "few shot" prompt from the config."""
    # Load the suffix and prefix templates.
    config = _load_template("suffix", config)
    config = _load_template("prefix", config)
    # Load the example prompt.
    if "example_prompt_path" in config:
        if "example_prompt" in config:
            raise ValueError(
                "Only one of example_prompt and example_prompt_path should "
                "be specified."
            )
        config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
    else:
        config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
    # Load the examples.
    config = _load_examples(config)
    config = _load_output_parser(config)
    return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
    """Load the prompt template from config."""
    # Load the template from disk if necessary.
    config = _load_template("template", config)
    config = _load_output_parser(config)
    template_format = config.get("template_format", "f-string")
    if template_format == "jinja2":
        # Disabled due to:
        # https://github.com/langchain-ai/langchain/issues/4394
        raise ValueError(
            f"Loading templates with '{template_format}' format is no longer supported "
            f"since it can lead to arbitrary code execution. Please migrate to using "
            f"the 'f-string' template format, which does not suffer from this issue."
        )
    return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
    """Unified method for loading a prompt from LangChainHub or local fs."""
    if hub_result := try_load_from_hub(
        path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
    ):
        return hub_result
    else:
        return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
    """Load prompt from file."""
    # Convert file to a Path object.
    if isinstance(file, str):
        file_path = Path(file)
    else:
        file_path = file
    # Load from either json or yaml.
    if file_path.suffix == ".json":
        with open(file_path) as f:
            config = json.load(f)
    elif file_path.suffix.endswith((".yaml", ".yml")):
        with open(file_path, "r") as f:
            config = yaml.safe_load(f)
    else:
        raise ValueError(f"Got unsupported file type {file_path.suffix}")
    # Load the prompt from the config now.
    return load_prompt_from_config(config)
def _load_chat_prompt(config: Dict) -> ChatPromptTemplate:
    """Load chat prompt from config"""
    messages = config.pop("messages")
    template = messages[0]["prompt"].pop("template") if messages else None
    config.pop("input_variables")
    if not template:
        raise ValueError("Can't load chat prompt without template")
    return ChatPromptTemplate.from_template(template=template, **config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
    "prompt": _load_prompt,
    "few_shot": _load_few_shot_prompt,
    "chat": _load_chat_prompt,
}
 | 
	[
  "langchain_core.utils.try_load_from_hub",
  "langchain_core.prompts.chat.ChatPromptTemplate.from_template",
  "langchain_core.output_parsers.string.StrOutputParser",
  "langchain_core.prompts.few_shot.FewShotPromptTemplate",
  "langchain_core.prompts.prompt.PromptTemplate"
] | 
	[((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import FewShotPromptTemplate\n'), ((4733, 4757), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {}), '(**config)\n', (4747, 4757), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((6137, 6198), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template'}), '(template=template, **config)\n', (6169, 6198), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((4921, 5007), 'langchain_core.utils.try_load_from_hub', 'try_load_from_hub', (['path', '_load_prompt_from_file', '"""prompts"""', "{'py', 'json', 'yaml'}"], {}), "(path, _load_prompt_from_file, 'prompts', {'py', 'json',\n    'yaml'})\n", (4938, 5007), False, 'from langchain_core.utils import try_load_from_hub\n'), ((5295, 5305), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (5299, 5305), False, 'from pathlib import Path\n'), ((2964, 2990), 'langchain_core.output_parsers.string.StrOutputParser', 'StrOutputParser', ([], {}), '(**_config)\n', (2979, 2990), False, 'from langchain_core.output_parsers.string import StrOutputParser\n'), ((5470, 5482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5479, 5482), False, 'import json\n'), ((5599, 5616), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import yaml\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2329, 2346), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2343, 2346), False, 'import yaml\n')] | 
| 
	from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
    Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
from langchain.tools import Tool
class RetrieverInput(BaseModel):
    """Input to the retriever."""
    query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
    query: str,
    retriever: BaseRetriever,
    document_prompt: BasePromptTemplate,
    document_separator: str,
    callbacks: Callbacks = None,
) -> str:
    docs = retriever.get_relevant_documents(query, callbacks=callbacks)
    return document_separator.join(
        format_document(doc, document_prompt) for doc in docs
    )
async def _aget_relevant_documents(
    query: str,
    retriever: BaseRetriever,
    document_prompt: BasePromptTemplate,
    document_separator: str,
    callbacks: Callbacks = None,
) -> str:
    docs = await retriever.aget_relevant_documents(query, callbacks=callbacks)
    return document_separator.join(
        format_document(doc, document_prompt) for doc in docs
    )
def create_retriever_tool(
    retriever: BaseRetriever,
    name: str,
    description: str,
    *,
    document_prompt: Optional[BasePromptTemplate] = None,
    document_separator: str = "\n\n",
) -> Tool:
    """Create a tool to do retrieval of documents.
    Args:
        retriever: The retriever to use for the retrieval
        name: The name for the tool. This will be passed to the language model,
            so should be unique and somewhat descriptive.
        description: The description for the tool. This will be passed to the language
            model, so should be descriptive.
    Returns:
        Tool class to pass to an agent
    """
    document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
    func = partial(
        _get_relevant_documents,
        retriever=retriever,
        document_prompt=document_prompt,
        document_separator=document_separator,
    )
    afunc = partial(
        _aget_relevant_documents,
        retriever=retriever,
        document_prompt=document_prompt,
        document_separator=document_separator,
    )
    return Tool(
        name=name,
        description=description,
        func=func,
        coroutine=afunc,
        args_schema=RetrieverInput,
    )
 | 
	[
  "langchain_core.pydantic_v1.Field",
  "langchain_core.prompts.format_document",
  "langchain.tools.Tool",
  "langchain_core.prompts.PromptTemplate.from_template"
] | 
	[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n    document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n    document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n    args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')] | 
| 
	from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
class XMLAgent(BaseSingleActionAgent):
    """Agent that uses XML tags.
    Args:
        tools: list of tools the agent can choose from
        llm_chain: The LLMChain to call to predict the next action
    Examples:
        .. code-block:: python
            from langchain.agents import XMLAgent
            from langchain
            tools = ...
            model =
    """
    tools: List[BaseTool]
    """List of tools this agent has access to."""
    llm_chain: LLMChain
    """Chain to use to predict action."""
    @property
    def input_keys(self) -> List[str]:
        return ["input"]
    @staticmethod
    def get_default_prompt() -> ChatPromptTemplate:
        base_prompt = ChatPromptTemplate.from_template(agent_instructions)
        return base_prompt + AIMessagePromptTemplate.from_template(
            "{intermediate_steps}"
        )
    @staticmethod
    def get_default_output_parser() -> XMLAgentOutputParser:
        return XMLAgentOutputParser()
    def plan(
        self,
        intermediate_steps: List[Tuple[AgentAction, str]],
        callbacks: Callbacks = None,
        **kwargs: Any,
    ) -> Union[AgentAction, AgentFinish]:
        log = ""
        for action, observation in intermediate_steps:
            log += (
                f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
                f"</tool_input><observation>{observation}</observation>"
            )
        tools = ""
        for tool in self.tools:
            tools += f"{tool.name}: {tool.description}\n"
        inputs = {
            "intermediate_steps": log,
            "tools": tools,
            "question": kwargs["input"],
            "stop": ["</tool_input>", "</final_answer>"],
        }
        response = self.llm_chain(inputs, callbacks=callbacks)
        return response[self.llm_chain.output_key]
    async def aplan(
        self,
        intermediate_steps: List[Tuple[AgentAction, str]],
        callbacks: Callbacks = None,
        **kwargs: Any,
    ) -> Union[AgentAction, AgentFinish]:
        log = ""
        for action, observation in intermediate_steps:
            log += (
                f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
                f"</tool_input><observation>{observation}</observation>"
            )
        tools = ""
        for tool in self.tools:
            tools += f"{tool.name}: {tool.description}\n"
        inputs = {
            "intermediate_steps": log,
            "tools": tools,
            "question": kwargs["input"],
            "stop": ["</tool_input>", "</final_answer>"],
        }
        response = await self.llm_chain.acall(inputs, callbacks=callbacks)
        return response[self.llm_chain.output_key]
def create_xml_agent(
    llm: BaseLanguageModel,
    tools: Sequence[BaseTool],
    prompt: BasePromptTemplate,
    tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
    """Create an agent that uses XML to format its logic.
    Args:
        llm: LLM to use as the agent.
        tools: Tools this agent has access to.
        prompt: The prompt to use, must have input keys
            `tools`: contains descriptions for each tool.
            `agent_scratchpad`: contains previous agent actions and tool outputs.
        tools_renderer: This controls how the tools are converted into a string and
            then passed into the LLM. Default is `render_text_description`.
    Returns:
        A Runnable sequence representing an agent. It takes as input all the same input
        variables as the prompt passed in does. It returns as output either an
        AgentAction or AgentFinish.
    Example:
        .. code-block:: python
            from langchain import hub
            from langchain_community.chat_models import ChatAnthropic
            from langchain.agents import AgentExecutor, create_xml_agent
            prompt = hub.pull("hwchase17/xml-agent-convo")
            model = ChatAnthropic()
            tools = ...
            agent = create_xml_agent(model, tools, prompt)
            agent_executor = AgentExecutor(agent=agent, tools=tools)
            agent_executor.invoke({"input": "hi"})
            # Use with chat history
            from langchain_core.messages import AIMessage, HumanMessage
            agent_executor.invoke(
                {
                    "input": "what's my name?",
                    # Notice that chat_history is a string
                    # since this prompt is aimed at LLMs, not chat models
                    "chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
                }
            )
    Prompt:
        The prompt must have input keys:
            * `tools`: contains descriptions for each tool.
            * `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string.
        Here's an example:
        .. code-block:: python
            from langchain_core.prompts import PromptTemplate
            template = '''You are a helpful assistant. Help the user answer any questions.
            You have access to the following tools:
            {tools}
            In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
            For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
            <tool>search</tool><tool_input>weather in SF</tool_input>
            <observation>64 degrees</observation>
            When you are done, respond with a final answer between <final_answer></final_answer>. For example:
            <final_answer>The weather in SF is 64 degrees</final_answer>
            Begin!
            Previous Conversation:
            {chat_history}
            Question: {input}
            {agent_scratchpad}'''
            prompt = PromptTemplate.from_template(template)
    """  # noqa: E501
    missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables)
    if missing_vars:
        raise ValueError(f"Prompt missing required variables: {missing_vars}")
    prompt = prompt.partial(
        tools=tools_renderer(list(tools)),
    )
    llm_with_stop = llm.bind(stop=["</tool_input>"])
    agent = (
        RunnablePassthrough.assign(
            agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
        )
        | prompt
        | llm_with_stop
        | XMLAgentOutputParser()
    )
    return agent
 | 
	[
  "langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
  "langchain_core.prompts.chat.ChatPromptTemplate.from_template",
  "langchain.agents.output_parsers.XMLAgentOutputParser",
  "langchain.agents.format_scratchpad.format_xml",
  "langchain_core._api.deprecated"
] | 
	[((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')] | 
| 
	"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
    from langchain_community import graphs
    # If not in interactive env, raise warning.
    if not is_interactive_env():
        warnings.warn(
            "Importing graphs from langchain is deprecated. Importing from "
            "langchain will no longer be supported as of langchain==0.2.0. "
            "Please import from langchain-community instead:\n\n"
            f"`from langchain_community.graphs import {name}`.\n\n"
            "To install langchain-community run `pip install -U langchain-community`.",
            category=LangChainDeprecationWarning,
        )
    return getattr(graphs, name)
__all__ = [
    "MemgraphGraph",
    "NetworkxEntityGraph",
    "Neo4jGraph",
    "NebulaGraph",
    "NeptuneGraph",
    "KuzuGraph",
    "HugeGraph",
    "RdfGraph",
    "ArangoGraph",
    "FalkorDBGraph",
]
 | 
	[
  "langchain.utils.interactive_env.is_interactive_env"
] | 
	[((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n    f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n    , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')] | 
| 
	"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
    AsyncCallbackManagerForChainRun,
    CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
    """Extract the scheme + domain from a given URL.
    Args:
        url (str): The input URL.
    Returns:
        return a 2-tuple of scheme and domain
    """
    parsed_uri = urlparse(url)
    return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
    """Check if a URL is in the allowed domains.
    Args:
        url (str): The input URL.
        limit_to_domains (Sequence[str]): The allowed domains.
    Returns:
        bool: True if the URL is in the allowed domains, False otherwise.
    """
    scheme, domain = _extract_scheme_and_domain(url)
    for allowed_domain in limit_to_domains:
        allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
        if scheme == allowed_scheme and domain == allowed_domain:
            return True
    return False
class APIChain(Chain):
    """Chain that makes API calls and summarizes the responses to answer a question.
    *Security Note*: This API chain uses the requests toolkit
        to make GET, POST, PATCH, PUT, and DELETE requests to an API.
        Exercise care in who is allowed to use this chain. If exposing
        to end users, consider that users will be able to make arbitrary
        requests on behalf of the server hosting the code. For example,
        users could ask the server to make a request to a private API
        that is only accessible from the server.
        Control access to who can submit issue requests using this toolkit and
        what network access it has.
        See https://python.langchain.com/docs/security for more information.
    """
    api_request_chain: LLMChain
    api_answer_chain: LLMChain
    requests_wrapper: TextRequestsWrapper = Field(exclude=True)
    api_docs: str
    question_key: str = "question"  #: :meta private:
    output_key: str = "output"  #: :meta private:
    limit_to_domains: Optional[Sequence[str]]
    """Use to limit the domains that can be accessed by the API chain.
    
    * For example, to limit to just the domain `https://www.example.com`, set
        `limit_to_domains=["https://www.example.com"]`.
        
    * The default value is an empty tuple, which means that no domains are
      allowed by default. By design this will raise an error on instantiation.
    * Use a None if you want to allow all domains by default -- this is not
      recommended for security reasons, as it would allow malicious users to
      make requests to arbitrary URLS including internal APIs accessible from
      the server.
    """
    @property
    def input_keys(self) -> List[str]:
        """Expect input key.
        :meta private:
        """
        return [self.question_key]
    @property
    def output_keys(self) -> List[str]:
        """Expect output key.
        :meta private:
        """
        return [self.output_key]
    @root_validator(pre=True)
    def validate_api_request_prompt(cls, values: Dict) -> Dict:
        """Check that api request prompt expects the right variables."""
        input_vars = values["api_request_chain"].prompt.input_variables
        expected_vars = {"question", "api_docs"}
        if set(input_vars) != expected_vars:
            raise ValueError(
                f"Input variables should be {expected_vars}, got {input_vars}"
            )
        return values
    @root_validator(pre=True)
    def validate_limit_to_domains(cls, values: Dict) -> Dict:
        """Check that allowed domains are valid."""
        if "limit_to_domains" not in values:
            raise ValueError(
                "You must specify a list of domains to limit access using "
                "`limit_to_domains`"
            )
        if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
            raise ValueError(
                "Please provide a list of domains to limit access using "
                "`limit_to_domains`."
            )
        return values
    @root_validator(pre=True)
    def validate_api_answer_prompt(cls, values: Dict) -> Dict:
        """Check that api answer prompt expects the right variables."""
        input_vars = values["api_answer_chain"].prompt.input_variables
        expected_vars = {"question", "api_docs", "api_url", "api_response"}
        if set(input_vars) != expected_vars:
            raise ValueError(
                f"Input variables should be {expected_vars}, got {input_vars}"
            )
        return values
    def _call(
        self,
        inputs: Dict[str, Any],
        run_manager: Optional[CallbackManagerForChainRun] = None,
    ) -> Dict[str, str]:
        _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
        question = inputs[self.question_key]
        api_url = self.api_request_chain.predict(
            question=question,
            api_docs=self.api_docs,
            callbacks=_run_manager.get_child(),
        )
        _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
        api_url = api_url.strip()
        if self.limit_to_domains and not _check_in_allowed_domain(
            api_url, self.limit_to_domains
        ):
            raise ValueError(
                f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
            )
        api_response = self.requests_wrapper.get(api_url)
        _run_manager.on_text(
            str(api_response), color="yellow", end="\n", verbose=self.verbose
        )
        answer = self.api_answer_chain.predict(
            question=question,
            api_docs=self.api_docs,
            api_url=api_url,
            api_response=api_response,
            callbacks=_run_manager.get_child(),
        )
        return {self.output_key: answer}
    async def _acall(
        self,
        inputs: Dict[str, Any],
        run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
    ) -> Dict[str, str]:
        _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
        question = inputs[self.question_key]
        api_url = await self.api_request_chain.apredict(
            question=question,
            api_docs=self.api_docs,
            callbacks=_run_manager.get_child(),
        )
        await _run_manager.on_text(
            api_url, color="green", end="\n", verbose=self.verbose
        )
        api_url = api_url.strip()
        if self.limit_to_domains and not _check_in_allowed_domain(
            api_url, self.limit_to_domains
        ):
            raise ValueError(
                f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
            )
        api_response = await self.requests_wrapper.aget(api_url)
        await _run_manager.on_text(
            str(api_response), color="yellow", end="\n", verbose=self.verbose
        )
        answer = await self.api_answer_chain.apredict(
            question=question,
            api_docs=self.api_docs,
            api_url=api_url,
            api_response=api_response,
            callbacks=_run_manager.get_child(),
        )
        return {self.output_key: answer}
    @classmethod
    def from_llm_and_api_docs(
        cls,
        llm: BaseLanguageModel,
        api_docs: str,
        headers: Optional[dict] = None,
        api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
        api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
        limit_to_domains: Optional[Sequence[str]] = tuple(),
        **kwargs: Any,
    ) -> APIChain:
        """Load chain from just an LLM and the api docs."""
        get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
        requests_wrapper = TextRequestsWrapper(headers=headers)
        get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
        return cls(
            api_request_chain=get_request_chain,
            api_answer_chain=get_answer_chain,
            requests_wrapper=requests_wrapper,
            api_docs=api_docs,
            limit_to_domains=limit_to_domains,
            **kwargs,
        )
    @property
    def _chain_type(self) -> str:
        return "api_chain"
 | 
	[
  "langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
  "langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
  "langchain.chains.llm.LLMChain",
  "langchain_community.utilities.requests.TextRequestsWrapper",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.pydantic_v1.root_validator"
] | 
	[((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')] | 
| 
	"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
    BaseChatPromptTemplate,
    BaseMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
    DEFAULT_FORMATTER_MAPPING,
    StringPromptTemplate,
    check_valid_template,
    get_template_variables,
)
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
    """Prompt template that contains few shot examples."""
    examples: Optional[List[dict]] = None
    """Examples to format into the prompt.
    Either this or example_selector should be provided."""
    example_selector: Any = None
    """ExampleSelector to choose the examples to format into the prompt.
    Either this or examples should be provided."""
    class Config:
        """Configuration for this pydantic object."""
        extra = Extra.forbid
        arbitrary_types_allowed = True
    @root_validator(pre=True)
    def check_examples_and_selector(cls, values: Dict) -> Dict:
        """Check that one and only one of examples/example_selector are provided."""
        examples = values.get("examples", None)
        example_selector = values.get("example_selector", None)
        if examples and example_selector:
            raise ValueError(
                "Only one of 'examples' and 'example_selector' should be provided"
            )
        if examples is None and example_selector is None:
            raise ValueError(
                "One of 'examples' and 'example_selector' should be provided"
            )
        return values
    def _get_examples(self, **kwargs: Any) -> List[dict]:
        """Get the examples to use for formatting the prompt.
        Args:
            **kwargs: Keyword arguments to be passed to the example selector.
        Returns:
            List of examples.
        """
        if self.examples is not None:
            return self.examples
        elif self.example_selector is not None:
            return self.example_selector.select_examples(kwargs)
        else:
            raise ValueError(
                "One of 'examples' and 'example_selector' should be provided"
            )
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
    """Prompt template that contains few shot examples."""
    @classmethod
    def is_lc_serializable(cls) -> bool:
        """Return whether or not the class is serializable."""
        return False
    validate_template: bool = False
    """Whether or not to try validating the template."""
    input_variables: List[str]
    """A list of the names of the variables the prompt template expects."""
    example_prompt: PromptTemplate
    """PromptTemplate used to format an individual example."""
    suffix: str
    """A prompt template string to put after the examples."""
    example_separator: str = "\n\n"
    """String separator used to join the prefix, the examples, and suffix."""
    prefix: str = ""
    """A prompt template string to put before the examples."""
    template_format: Literal["f-string", "jinja2"] = "f-string"
    """The format of the prompt template. Options are: 'f-string', 'jinja2'."""
    @root_validator()
    def template_is_valid(cls, values: Dict) -> Dict:
        """Check that prefix, suffix, and input variables are consistent."""
        if values["validate_template"]:
            check_valid_template(
                values["prefix"] + values["suffix"],
                values["template_format"],
                values["input_variables"] + list(values["partial_variables"]),
            )
        elif values.get("template_format"):
            values["input_variables"] = [
                var
                for var in get_template_variables(
                    values["prefix"] + values["suffix"], values["template_format"]
                )
                if var not in values["partial_variables"]
            ]
        return values
    class Config:
        """Configuration for this pydantic object."""
        extra = Extra.forbid
        arbitrary_types_allowed = True
    def format(self, **kwargs: Any) -> str:
        """Format the prompt with the inputs.
        Args:
            **kwargs: Any arguments to be passed to the prompt template.
        Returns:
            A formatted string.
        Example:
        .. code-block:: python
            prompt.format(variable1="foo")
        """
        kwargs = self._merge_partial_and_user_variables(**kwargs)
        # Get the examples to use.
        examples = self._get_examples(**kwargs)
        examples = [
            {k: e[k] for k in self.example_prompt.input_variables} for e in examples
        ]
        # Format the examples.
        example_strings = [
            self.example_prompt.format(**example) for example in examples
        ]
        # Create the overall template.
        pieces = [self.prefix, *example_strings, self.suffix]
        template = self.example_separator.join([piece for piece in pieces if piece])
        # Format the template with the input variables.
        return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
    @property
    def _prompt_type(self) -> str:
        """Return the prompt type key."""
        return "few_shot"
    def save(self, file_path: Union[Path, str]) -> None:
        if self.example_selector:
            raise ValueError("Saving an example selector is not currently supported")
        return super().save(file_path)
class FewShotChatMessagePromptTemplate(
    BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
    """Chat prompt template that supports few-shot examples.
    The high level structure of produced by this prompt template is a list of messages
    consisting of prefix message(s), example message(s), and suffix message(s).
    This structure enables creating a conversation with intermediate examples like:
        System: You are a helpful AI Assistant
        Human: What is 2+2?
        AI: 4
        Human: What is 2+3?
        AI: 5
        Human: What is 4+4?
    This prompt template can be used to generate a fixed list of examples or else
    to dynamically select examples based on the input.
    Examples:
        Prompt template with a fixed list of examples (matching the sample
        conversation above):
        .. code-block:: python
            from langchain_core.prompts import (
                FewShotChatMessagePromptTemplate,
                ChatPromptTemplate
            )
            examples = [
                {"input": "2+2", "output": "4"},
                {"input": "2+3", "output": "5"},
            ]
            example_prompt = ChatPromptTemplate.from_messages(
                [('human', '{input}'), ('ai', '{output}')]
            )
            few_shot_prompt = FewShotChatMessagePromptTemplate(
                examples=examples,
                # This is a prompt template used to format each individual example.
                example_prompt=example_prompt,
            )
            final_prompt = ChatPromptTemplate.from_messages(
                [
                    ('system', 'You are a helpful AI Assistant'),
                    few_shot_prompt,
                    ('human', '{input}'),
                ]
            )
            final_prompt.format(input="What is 4+4?")
        Prompt template with dynamically selected examples:
        .. code-block:: python
            from langchain_core.prompts import SemanticSimilarityExampleSelector
            from langchain_core.embeddings import OpenAIEmbeddings
            from langchain_core.vectorstores import Chroma
            examples = [
                {"input": "2+2", "output": "4"},
                {"input": "2+3", "output": "5"},
                {"input": "2+4", "output": "6"},
                # ...
            ]
            to_vectorize = [
                " ".join(example.values())
                for example in examples
            ]
            embeddings = OpenAIEmbeddings()
            vectorstore = Chroma.from_texts(
                to_vectorize, embeddings, metadatas=examples
            )
            example_selector = SemanticSimilarityExampleSelector(
                vectorstore=vectorstore
            )
            from langchain_core import SystemMessage
            from langchain_core.prompts import HumanMessagePromptTemplate
            from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
            few_shot_prompt = FewShotChatMessagePromptTemplate(
                # Which variable(s) will be passed to the example selector.
                input_variables=["input"],
                example_selector=example_selector,
                # Define how each example will be formatted.
                # In this case, each example will become 2 messages:
                # 1 human, and 1 AI
                example_prompt=(
                    HumanMessagePromptTemplate.from_template("{input}")
                    + AIMessagePromptTemplate.from_template("{output}")
                ),
            )
            # Define the overall prompt.
            final_prompt = (
                SystemMessagePromptTemplate.from_template(
                    "You are a helpful AI Assistant"
                )
                + few_shot_prompt
                + HumanMessagePromptTemplate.from_template("{input}")
            )
            # Show the prompt
            print(final_prompt.format_messages(input="What's 3+3?"))  # noqa: T201
            # Use within an LLM
            from langchain_core.chat_models import ChatAnthropic
            chain = final_prompt | ChatAnthropic()
            chain.invoke({"input": "What's 3+3?"})
    """
    @classmethod
    def is_lc_serializable(cls) -> bool:
        """Return whether or not the class is serializable."""
        return False
    input_variables: List[str] = Field(default_factory=list)
    """A list of the names of the variables the prompt template will use
    to pass to the example_selector, if provided."""
    example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
    """The class to format each example."""
    class Config:
        """Configuration for this pydantic object."""
        extra = Extra.forbid
        arbitrary_types_allowed = True
    def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
        """Format kwargs into a list of messages.
        Args:
            **kwargs: keyword arguments to use for filling in templates in messages.
        Returns:
            A list of formatted messages with all template variables filled in.
        """
        # Get the examples to use.
        examples = self._get_examples(**kwargs)
        examples = [
            {k: e[k] for k in self.example_prompt.input_variables} for e in examples
        ]
        # Format the examples.
        messages = [
            message
            for example in examples
            for message in self.example_prompt.format_messages(**example)
        ]
        return messages
    def format(self, **kwargs: Any) -> str:
        """Format the prompt with inputs generating a string.
        Use this method to generate a string representation of a prompt consisting
        of chat messages.
        Useful for feeding into a string based completion language model or debugging.
        Args:
            **kwargs: keyword arguments to use for formatting.
        Returns:
            A string representation of the prompt
        """
        messages = self.format_messages(**kwargs)
        return get_buffer_string(messages)
    def pretty_repr(self, html: bool = False) -> str:
        raise NotImplementedError()
 | 
	[
  "langchain_core.pydantic_v1.Field",
  "langchain_core.messages.get_buffer_string",
  "langchain_core.pydantic_v1.root_validator",
  "langchain_core.prompts.string.get_template_variables"
] | 
	[((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (3482, 3484), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((10187, 10214), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10192, 10214), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((11875, 11902), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (11892, 11902), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((4012, 4103), 'langchain_core.prompts.string.get_template_variables', 'get_template_variables', (["(values['prefix'] + values['suffix'])", "values['template_format']"], {}), "(values['prefix'] + values['suffix'], values[\n    'template_format'])\n", (4034, 4103), False, 'from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, get_template_variables\n')] | 
| 
	"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
    """Generate hypothetical document for query, and then embed that.
    Based on https://arxiv.org/abs/2212.10496
    """
    base_embeddings: Embeddings
    llm_chain: LLMChain
    class Config:
        """Configuration for this pydantic object."""
        extra = Extra.forbid
        arbitrary_types_allowed = True
    @property
    def input_keys(self) -> List[str]:
        """Input keys for Hyde's LLM chain."""
        return self.llm_chain.input_keys
    @property
    def output_keys(self) -> List[str]:
        """Output keys for Hyde's LLM chain."""
        return self.llm_chain.output_keys
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """Call the base embeddings."""
        return self.base_embeddings.embed_documents(texts)
    def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
        """Combine embeddings into final embeddings."""
        return list(np.array(embeddings).mean(axis=0))
    def embed_query(self, text: str) -> List[float]:
        """Generate a hypothetical document and embedded it."""
        var_name = self.llm_chain.input_keys[0]
        result = self.llm_chain.generate([{var_name: text}])
        documents = [generation.text for generation in result.generations[0]]
        embeddings = self.embed_documents(documents)
        return self.combine_embeddings(embeddings)
    def _call(
        self,
        inputs: Dict[str, Any],
        run_manager: Optional[CallbackManagerForChainRun] = None,
    ) -> Dict[str, str]:
        """Call the internal llm chain."""
        _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
        return self.llm_chain(inputs, callbacks=_run_manager.get_child())
    @classmethod
    def from_llm(
        cls,
        llm: BaseLanguageModel,
        base_embeddings: Embeddings,
        prompt_key: Optional[str] = None,
        custom_prompt: Optional[BasePromptTemplate] = None,
        **kwargs: Any,
    ) -> HypotheticalDocumentEmbedder:
        """Load and use LLMChain with either a specific prompt key or custom prompt."""
        if custom_prompt is not None:
            prompt = custom_prompt
        elif prompt_key is not None and prompt_key in PROMPT_MAP:
            prompt = PROMPT_MAP[prompt_key]
        else:
            raise ValueError(
                f"Must specify prompt_key if custom_prompt not provided. Should be one "
                f"of {list(PROMPT_MAP.keys())}."
            )
        llm_chain = LLMChain(llm=llm, prompt=prompt)
        return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
    @property
    def _chain_type(self) -> str:
        return "hyde_chain"
 | 
	[
  "langchain.chains.hyde.prompts.PROMPT_MAP.keys",
  "langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
  "langchain.chains.llm.LLMChain"
] | 
	[((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')] | 
| 
	"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.chains import LLMChain
from langchain.tools.render import render_text_description
class ChainConfig(NamedTuple):
    """Configuration for chain to use in MRKL system.
    Args:
        action_name: Name of the action.
        action: Action function to call.
        action_description: Description of the action.
    """
    action_name: str
    action: Callable
    action_description: str
@deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0")
class ZeroShotAgent(Agent):
    """Agent for the MRKL chain."""
    output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
    @classmethod
    def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
        return MRKLOutputParser()
    @property
    def _agent_type(self) -> str:
        """Return Identifier of agent type."""
        return AgentType.ZERO_SHOT_REACT_DESCRIPTION
    @property
    def observation_prefix(self) -> str:
        """Prefix to append the observation with."""
        return "Observation: "
    @property
    def llm_prefix(self) -> str:
        """Prefix to append the llm call with."""
        return "Thought:"
    @classmethod
    def create_prompt(
        cls,
        tools: Sequence[BaseTool],
        prefix: str = PREFIX,
        suffix: str = SUFFIX,
        format_instructions: str = FORMAT_INSTRUCTIONS,
        input_variables: Optional[List[str]] = None,
    ) -> PromptTemplate:
        """Create prompt in the style of the zero shot agent.
        Args:
            tools: List of tools the agent will have access to, used to format the
                prompt.
            prefix: String to put before the list of tools.
            suffix: String to put after the list of tools.
            input_variables: List of input variables the final prompt will expect.
        Returns:
            A PromptTemplate with the template assembled from the pieces here.
        """
        tool_strings = render_text_description(list(tools))
        tool_names = ", ".join([tool.name for tool in tools])
        format_instructions = format_instructions.format(tool_names=tool_names)
        template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
        if input_variables:
            return PromptTemplate(template=template, input_variables=input_variables)
        return PromptTemplate.from_template(template)
    @classmethod
    def from_llm_and_tools(
        cls,
        llm: BaseLanguageModel,
        tools: Sequence[BaseTool],
        callback_manager: Optional[BaseCallbackManager] = None,
        output_parser: Optional[AgentOutputParser] = None,
        prefix: str = PREFIX,
        suffix: str = SUFFIX,
        format_instructions: str = FORMAT_INSTRUCTIONS,
        input_variables: Optional[List[str]] = None,
        **kwargs: Any,
    ) -> Agent:
        """Construct an agent from an LLM and tools."""
        cls._validate_tools(tools)
        prompt = cls.create_prompt(
            tools,
            prefix=prefix,
            suffix=suffix,
            format_instructions=format_instructions,
            input_variables=input_variables,
        )
        llm_chain = LLMChain(
            llm=llm,
            prompt=prompt,
            callback_manager=callback_manager,
        )
        tool_names = [tool.name for tool in tools]
        _output_parser = output_parser or cls._get_default_output_parser()
        return cls(
            llm_chain=llm_chain,
            allowed_tools=tool_names,
            output_parser=_output_parser,
            **kwargs,
        )
    @classmethod
    def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
        validate_tools_single_input(cls.__name__, tools)
        if len(tools) == 0:
            raise ValueError(
                f"Got no tools for {cls.__name__}. At least one tool must be provided."
            )
        for tool in tools:
            if tool.description is None:
                raise ValueError(
                    f"Got a tool {tool.name} without a description. For this agent, "
                    f"a description must always be provided."
                )
        super()._validate_tools(tools)
@deprecated("0.1.0", removal="0.2.0")
class MRKLChain(AgentExecutor):
    """[Deprecated] Chain that implements the MRKL system."""
    @classmethod
    def from_chains(
        cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
    ) -> AgentExecutor:
        """User friendly way to initialize the MRKL chain.
        This is intended to be an easy way to get up and running with the
        MRKL chain.
        Args:
            llm: The LLM to use as the agent LLM.
            chains: The chains the MRKL system has access to.
            **kwargs: parameters to be passed to initialization.
        Returns:
            An initialized MRKL chain.
        """
        tools = [
            Tool(
                name=c.action_name,
                func=c.action,
                description=c.action_description,
            )
            for c in chains
        ]
        agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
        return cls(agent=agent, tools=tools, **kwargs)
 | 
	[
  "langchain.agents.mrkl.output_parser.MRKLOutputParser",
  "langchain.agents.utils.validate_tools_single_input",
  "langchain_core.pydantic_v1.Field",
  "langchain_core.prompts.PromptTemplate",
  "langchain_core._api.deprecated",
  "langchain.chains.LLMChain",
  "langchain.agents.tools.Tool",
  "langchain_core.prompts.PromptTemplate.from_template"
] | 
	[((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')] | 
| 
	import base64
import io
import os
import uuid
from io import BytesIO
from pathlib import Path
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import LocalFileStore
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from PIL import Image
def image_summarize(img_base64, prompt):
    """
    Make image summary
    :param img_base64: Base64 encoded string for image
    :param prompt: Text prompt for summarizatiomn
    :return: Image summarization prompt
    """
    chat = ChatOllama(model="bakllava", temperature=0)
    msg = chat.invoke(
        [
            HumanMessage(
                content=[
                    {"type": "text", "text": prompt},
                    {
                        "type": "image_url",
                        "image_url": f"data:image/jpeg;base64,{img_base64}",
                    },
                ]
            )
        ]
    )
    return msg.content
def generate_img_summaries(img_base64_list):
    """
    Generate summaries for images
    :param img_base64_list: Base64 encoded images
    :return: List of image summaries and processed images
    """
    # Store image summaries
    image_summaries = []
    processed_images = []
    # Prompt
    prompt = """Give a detailed summary of the image."""
    # Apply summarization to images
    for i, base64_image in enumerate(img_base64_list):
        try:
            image_summaries.append(image_summarize(base64_image, prompt))
            processed_images.append(base64_image)
        except Exception as e:
            print(f"Error with image {i+1}: {e}")  # noqa: T201
    return image_summaries, processed_images
def get_images(img_path):
    """
    Extract images.
    :param img_path: A string representing the path to the images.
    """
    # Get image URIs
    pil_images = [
        Image.open(os.path.join(img_path, image_name))
        for image_name in os.listdir(img_path)
        if image_name.endswith(".jpg")
    ]
    return pil_images
def resize_base64_image(base64_string, size=(128, 128)):
    """
    Resize an image encoded as a Base64 string
    :param base64_string: Base64 string
    :param size: Image size
    :return: Re-sized Base64 string
    """
    # Decode the Base64 string
    img_data = base64.b64decode(base64_string)
    img = Image.open(io.BytesIO(img_data))
    # Resize the image
    resized_img = img.resize(size, Image.LANCZOS)
    # Save the resized image to a bytes buffer
    buffered = io.BytesIO()
    resized_img.save(buffered, format=img.format)
    # Encode the resized image to Base64
    return base64.b64encode(buffered.getvalue()).decode("utf-8")
def convert_to_base64(pil_image):
    """
    Convert PIL images to Base64 encoded strings
    :param pil_image: PIL image
    :return: Re-sized Base64 string
    """
    buffered = BytesIO()
    pil_image.save(buffered, format="JPEG")  # You can change the format if needed
    img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
    # img_str = resize_base64_image(img_str, size=(831,623))
    return img_str
def create_multi_vector_retriever(vectorstore, image_summaries, images):
    """
    Create retriever that indexes summaries, but returns raw images or texts
    :param vectorstore: Vectorstore to store embedded image sumamries
    :param image_summaries: Image summaries
    :param images: Base64 encoded images
    :return: Retriever
    """
    # Initialize the storage layer for images
    store = LocalFileStore(
        str(Path(__file__).parent / "multi_vector_retriever_metadata")
    )
    id_key = "doc_id"
    # Create the multi-vector retriever
    retriever = MultiVectorRetriever(
        vectorstore=vectorstore,
        byte_store=store,
        id_key=id_key,
    )
    # Helper function to add documents to the vectorstore and docstore
    def add_documents(retriever, doc_summaries, doc_contents):
        doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
        summary_docs = [
            Document(page_content=s, metadata={id_key: doc_ids[i]})
            for i, s in enumerate(doc_summaries)
        ]
        retriever.vectorstore.add_documents(summary_docs)
        retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
    add_documents(retriever, image_summaries, images)
    return retriever
# Load images
doc_path = Path(__file__).parent / "docs/"
rel_doc_path = doc_path.relative_to(Path.cwd())
print("Read images")  # noqa: T201
pil_images = get_images(rel_doc_path)
# Convert to b64
images_base_64 = [convert_to_base64(i) for i in pil_images]
# Image summaries
print("Generate image summaries")  # noqa: T201
image_summaries, images_base_64_processed = generate_img_summaries(images_base_64)
# The vectorstore to use to index the images summaries
vectorstore_mvr = Chroma(
    collection_name="image_summaries",
    persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"),
    embedding_function=OllamaEmbeddings(model="llama2:7b"),
)
# Create documents
images_base_64_processed_documents = [
    Document(page_content=i) for i in images_base_64_processed
]
# Create retriever
retriever_multi_vector_img = create_multi_vector_retriever(
    vectorstore_mvr,
    image_summaries,
    images_base_64_processed_documents,
)
 | 
	[
  "langchain_core.documents.Document",
  "langchain_community.embeddings.OllamaEmbeddings",
  "langchain_community.chat_models.ChatOllama",
  "langchain_core.messages.HumanMessage",
  "langchain.retrievers.multi_vector.MultiVectorRetriever"
] | 
	[((731, 774), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""bakllava"""', 'temperature': '(0)'}), "(model='bakllava', temperature=0)\n", (741, 774), False, 'from langchain_community.chat_models import ChatOllama\n'), ((2494, 2525), 'base64.b64decode', 'base64.b64decode', (['base64_string'], {}), '(base64_string)\n', (2510, 2525), False, 'import base64\n'), ((2706, 2718), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2716, 2718), False, 'import io\n'), ((3062, 3071), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3069, 3071), False, 'from io import BytesIO\n'), ((3881, 3959), 'langchain.retrievers.multi_vector.MultiVectorRetriever', 'MultiVectorRetriever', ([], {'vectorstore': 'vectorstore', 'byte_store': 'store', 'id_key': 'id_key'}), '(vectorstore=vectorstore, byte_store=store, id_key=id_key)\n', (3901, 3959), False, 'from langchain.retrievers.multi_vector import MultiVectorRetriever\n'), ((4634, 4644), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4642, 4644), False, 'from pathlib import Path\n'), ((5269, 5293), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'i'}), '(page_content=i)\n', (5277, 5293), False, 'from langchain_core.documents import Document\n'), ((2547, 2567), 'io.BytesIO', 'io.BytesIO', (['img_data'], {}), '(img_data)\n', (2557, 2567), False, 'import io\n'), ((4566, 4580), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4570, 4580), False, 'from pathlib import Path\n'), ((5167, 5202), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'model': '"""llama2:7b"""'}), "(model='llama2:7b')\n", (5183, 5202), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((821, 957), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': "[{'type': 'text', 'text': prompt}, {'type': 'image_url', 'image_url':\n    f'data:image/jpeg;base64,{img_base64}'}]"}), "(content=[{'type': 'text', 'text': prompt}, {'type':\n    'image_url', 'image_url': f'data:image/jpeg;base64,{img_base64}'}])\n", (833, 957), False, 'from langchain_core.messages import HumanMessage\n'), ((2071, 2105), 'os.path.join', 'os.path.join', (['img_path', 'image_name'], {}), '(img_path, image_name)\n', (2083, 2105), False, 'import os\n'), ((2133, 2153), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (2143, 2153), False, 'import os\n'), ((4223, 4278), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 's', 'metadata': '{id_key: doc_ids[i]}'}), '(page_content=s, metadata={id_key: doc_ids[i]})\n', (4231, 4278), False, 'from langchain_core.documents import Document\n'), ((4149, 4161), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4159, 4161), False, 'import uuid\n'), ((3737, 3751), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3741, 3751), False, 'from pathlib import Path\n'), ((5094, 5108), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5098, 5108), False, 'from pathlib import Path\n')] | 
| 
	from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel
from hyde.prompts import hyde_prompt
# Example for document loading (from url), splitting, and creating vectostore
""" 
# Load
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = Chroma.from_documents(documents=all_splits, 
                                    collection_name="rag-chroma",
                                    embedding=OpenAIEmbeddings(),
                                    )
retriever = vectorstore.as_retriever()
"""
# Embed a single document as a test
vectorstore = Chroma.from_texts(
    ["harrison worked at kensho"],
    collection_name="rag-chroma",
    embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# LLM
model = ChatOpenAI()
# Query transformation chain
# This transforms the query into the hypothetical document
hyde_chain = hyde_prompt | model | StrOutputParser()
# RAG chain
chain = (
    RunnableParallel(
        {
            # Generate a hypothetical document and then pass it to the retriever
            "context": hyde_chain | retriever,
            "question": lambda x: x["question"],
        }
    )
    | prompt
    | model
    | StrOutputParser()
)
# Add input types for playground
class ChainInput(BaseModel):
    question: str
chain = chain.with_types(input_type=ChainInput)
 | 
	[
  "langchain_community.chat_models.ChatOpenAI",
  "langchain_core.prompts.ChatPromptTemplate.from_template",
  "langchain_core.output_parsers.StrOutputParser",
  "langchain_community.embeddings.OpenAIEmbeddings",
  "langchain_core.runnables.RunnableParallel"
] | 
	[((1516, 1558), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1548, 1558), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1574, 1586), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1584, 1586), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1711, 1728), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1726, 1728), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((2008, 2025), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2023, 2025), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1325, 1343), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1341, 1343), False, 'from langchain_community.embeddings import OpenAIEmbeddings\n'), ((1756, 1850), 'langchain_core.runnables.RunnableParallel', 'RunnableParallel', (["{'context': hyde_chain | retriever, 'question': lambda x: x['question']}"], {}), "({'context': hyde_chain | retriever, 'question': lambda x:\n    x['question']})\n", (1772, 1850), False, 'from langchain_core.runnables import RunnableParallel\n')] | 
| 
	from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     stream: bool = Body(False, description="流式输出"),
                     echo: bool = Body(False, description="除了输出之外,还回显输入"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     ):
    #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
    async def completion_iterator(query: str,
                                  model_name: str = LLM_MODELS[0],
                                  prompt_name: str = prompt_name,
                                  echo: bool = echo,
                                  ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = AsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None
        model = get_OpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
            echo=echo
        )
        prompt_template = get_prompt_template("completion", prompt_name)
        prompt = PromptTemplate.from_template(prompt_template)
        chain = LLMChain(prompt=prompt, llm=model)
        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query}),
            callback.done),
        )
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield token
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            yield answer
        await task
    return EventSourceResponse(completion_iterator(query=query,
                                                 model_name=model_name,
                                                 prompt_name=prompt_name),
                             )
 | 
	[
  "langchain.chains.LLMChain",
  "langchain.prompts.PromptTemplate.from_template",
  "langchain.callbacks.AsyncIteratorCallbackHandler"
] | 
	[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n    max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] | 
| 
	from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     stream: bool = Body(False, description="流式输出"),
                     echo: bool = Body(False, description="除了输出之外,还回显输入"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     ):
    #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
    async def completion_iterator(query: str,
                                  model_name: str = LLM_MODELS[0],
                                  prompt_name: str = prompt_name,
                                  echo: bool = echo,
                                  ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = AsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None
        model = get_OpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
            echo=echo
        )
        prompt_template = get_prompt_template("completion", prompt_name)
        prompt = PromptTemplate.from_template(prompt_template)
        chain = LLMChain(prompt=prompt, llm=model)
        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query}),
            callback.done),
        )
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield token
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            yield answer
        await task
    return EventSourceResponse(completion_iterator(query=query,
                                                 model_name=model_name,
                                                 prompt_name=prompt_name),
                             )
 | 
	[
  "langchain.chains.LLMChain",
  "langchain.prompts.PromptTemplate.from_template",
  "langchain.callbacks.AsyncIteratorCallbackHandler"
] | 
	[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n    max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] | 
| 
	from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     stream: bool = Body(False, description="流式输出"),
                     echo: bool = Body(False, description="除了输出之外,还回显输入"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     ):
    #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
    async def completion_iterator(query: str,
                                  model_name: str = LLM_MODELS[0],
                                  prompt_name: str = prompt_name,
                                  echo: bool = echo,
                                  ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = AsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None
        model = get_OpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
            echo=echo
        )
        prompt_template = get_prompt_template("completion", prompt_name)
        prompt = PromptTemplate.from_template(prompt_template)
        chain = LLMChain(prompt=prompt, llm=model)
        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query}),
            callback.done),
        )
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield token
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            yield answer
        await task
    return EventSourceResponse(completion_iterator(query=query,
                                                 model_name=model_name,
                                                 prompt_name=prompt_name),
                             )
 | 
	[
  "langchain.chains.LLMChain",
  "langchain.prompts.PromptTemplate.from_template",
  "langchain.callbacks.AsyncIteratorCallbackHandler"
] | 
	[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n    max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] | 
| 
	from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     stream: bool = Body(False, description="流式输出"),
                     echo: bool = Body(False, description="除了输出之外,还回显输入"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     ):
    #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
    async def completion_iterator(query: str,
                                  model_name: str = LLM_MODELS[0],
                                  prompt_name: str = prompt_name,
                                  echo: bool = echo,
                                  ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = AsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None
        model = get_OpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
            echo=echo
        )
        prompt_template = get_prompt_template("completion", prompt_name)
        prompt = PromptTemplate.from_template(prompt_template)
        chain = LLMChain(prompt=prompt, llm=model)
        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query}),
            callback.done),
        )
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield token
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            yield answer
        await task
    return EventSourceResponse(completion_iterator(query=query,
                                                 model_name=model_name,
                                                 prompt_name=prompt_name),
                             )
 | 
	[
  "langchain.chains.LLMChain",
  "langchain.prompts.PromptTemplate.from_template",
  "langchain.callbacks.AsyncIteratorCallbackHandler"
] | 
	[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n    max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] | 
| 
	from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
 | 
	[
  "langchain.llms.Ollama"
] | 
	[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] | 
| 
	from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
 | 
	[
  "langchain.llms.Ollama"
] | 
	[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] | 
| 
	from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
 | 
	[
  "langchain.llms.Ollama"
] | 
	[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] | 
| 
	import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
    CharacterTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
    TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter
from llama_index import SimpleDirectoryReader
from llama_index.node_parser.interface import TextSplitter
from llama_index.schema import Document
from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter
from streamlit.runtime.uploaded_file_manager import UploadedFile
DEFAULT_TEXT = "The quick brown fox jumps over the lazy dog."
text = st.sidebar.text_area("Enter text", value=DEFAULT_TEXT)
uploaded_files = st.sidebar.file_uploader("Upload file", accept_multiple_files=True)
type = st.sidebar.radio("Document Type", options=["Text", "Code"])
n_cols = st.sidebar.number_input("Columns", value=2, min_value=1, max_value=3)
assert isinstance(n_cols, int)
@st.cache_resource(ttl=3600)
def load_document(uploaded_files: List[UploadedFile]) -> List[Document]:
    # Read documents
    temp_dir = tempfile.TemporaryDirectory()
    for file in uploaded_files:
        temp_filepath = os.path.join(temp_dir.name, file.name)
        with open(temp_filepath, "wb") as f:
            f.write(file.getvalue())
    reader = SimpleDirectoryReader(input_dir=temp_dir.name)
    return reader.load_data()
if uploaded_files:
    if text != DEFAULT_TEXT:
        st.warning("Text will be ignored when uploading files")
    docs = load_document(uploaded_files)
    text = "\n".join([doc.text for doc in docs])
chunk_size = st.slider(
    "Chunk Size",
    value=512,
    min_value=1,
    max_value=4096,
)
chunk_overlap = st.slider(
    "Chunk Overlap",
    value=0,
    min_value=0,
    max_value=4096,
)
cols = st.columns(n_cols)
for ind, col in enumerate(cols):
    if type == "Text":
        text_splitter_cls = col.selectbox(
            "Text Splitter",
            options=[
                "TokenTextSplitter",
                "SentenceSplitter",
                "LC:RecursiveCharacterTextSplitter",
                "LC:CharacterTextSplitter",
                "LC:TokenTextSplitter",
            ],
            index=ind,
            key=f"splitter_cls_{ind}",
        )
        text_splitter: Union[TextSplitter, LCSplitter]
        if text_splitter_cls == "TokenTextSplitter":
            text_splitter = TokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "SentenceSplitter":
            text_splitter = SentenceSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:RecursiveCharacterTextSplitter":
            text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:CharacterTextSplitter":
            text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:TokenTextSplitter":
            text_splitter = LCTokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        else:
            raise ValueError("Unknown text splitter")
    elif type == "Code":
        text_splitter_cls = col.selectbox("Text Splitter", options=["CodeSplitter"])
        if text_splitter_cls == "CodeSplitter":
            language = col.text_input("Language", value="python")
            max_chars = col.slider("Max Chars", value=1500)
            text_splitter = CodeSplitter(language=language, max_chars=max_chars)
        else:
            raise ValueError("Unknown text splitter")
    chunks = text_splitter.split_text(text)
    tokenizer = tiktoken.get_encoding("gpt2").encode
    for chunk_ind, chunk in enumerate(chunks):
        n_tokens = len(tokenizer(chunk))
        n_chars = len(chunk)
        col.text_area(
            f"Chunk {chunk_ind} - {n_tokens} tokens - {n_chars} chars",
            chunk,
            key=f"text_area_{ind}_{chunk_ind}",
            height=500,
        )
 | 
	[
  "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.TokenTextSplitter"
] | 
	[((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_multiple_files': '(True)'}), "('Upload file', accept_multiple_files=True)\n", (814, 857), True, 'import streamlit as st\n'), ((865, 924), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Document Type"""'], {'options': "['Text', 'Code']"}), "('Document Type', options=['Text', 'Code'])\n", (881, 924), True, 'import streamlit as st\n'), ((934, 1003), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Columns"""'], {'value': '(2)', 'min_value': '(1)', 'max_value': '(3)'}), "('Columns', value=2, min_value=1, max_value=3)\n", (957, 1003), True, 'import streamlit as st\n'), ((1038, 1065), 'streamlit.cache_resource', 'st.cache_resource', ([], {'ttl': '(3600)'}), '(ttl=3600)\n', (1055, 1065), True, 'import streamlit as st\n'), ((1692, 1755), 'streamlit.slider', 'st.slider', (['"""Chunk Size"""'], {'value': '(512)', 'min_value': '(1)', 'max_value': '(4096)'}), "('Chunk Size', value=512, min_value=1, max_value=4096)\n", (1701, 1755), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.slider', 'st.slider', (['"""Chunk Overlap"""'], {'value': '(0)', 'min_value': '(0)', 'max_value': '(4096)'}), "('Chunk Overlap', value=0, min_value=0, max_value=4096)\n", (1800, 1855), True, 'import streamlit as st\n'), ((1883, 1901), 'streamlit.columns', 'st.columns', (['n_cols'], {}), '(n_cols)\n', (1893, 1901), True, 'import streamlit as st\n'), ((1175, 1204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1202, 1204), False, 'import tempfile\n'), ((1396, 1442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'temp_dir.name'}), '(input_dir=temp_dir.name)\n', (1417, 1442), False, 'from llama_index import SimpleDirectoryReader\n'), ((1261, 1299), 'os.path.join', 'os.path.join', (['temp_dir.name', 'file.name'], {}), '(temp_dir.name, file.name)\n', (1273, 1299), False, 'import os\n'), ((1531, 1586), 'streamlit.warning', 'st.warning', (['"""Text will be ignored when uploading files"""'], {}), "('Text will be ignored when uploading files')\n", (1541, 1586), True, 'import streamlit as st\n'), ((3968, 3997), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (3989, 3997), False, 'import tiktoken\n'), ((2486, 2555), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2503, 2555), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2668, 2736), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2684, 2736), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((3786, 3838), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': 'language', 'max_chars': 'max_chars'}), '(language=language, max_chars=max_chars)\n', (3798, 3838), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2866, 2974), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (2918, 2974), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3091, 3190), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (3134, 3190), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3303, 3374), 'langchain.text_splitter.TokenTextSplitter', 'LCTokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3322, 3374), True, 'from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter\n')] | 
| 
	import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
    CharacterTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
    TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter
from llama_index import SimpleDirectoryReader
from llama_index.node_parser.interface import TextSplitter
from llama_index.schema import Document
from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter
from streamlit.runtime.uploaded_file_manager import UploadedFile
DEFAULT_TEXT = "The quick brown fox jumps over the lazy dog."
text = st.sidebar.text_area("Enter text", value=DEFAULT_TEXT)
uploaded_files = st.sidebar.file_uploader("Upload file", accept_multiple_files=True)
type = st.sidebar.radio("Document Type", options=["Text", "Code"])
n_cols = st.sidebar.number_input("Columns", value=2, min_value=1, max_value=3)
assert isinstance(n_cols, int)
@st.cache_resource(ttl=3600)
def load_document(uploaded_files: List[UploadedFile]) -> List[Document]:
    # Read documents
    temp_dir = tempfile.TemporaryDirectory()
    for file in uploaded_files:
        temp_filepath = os.path.join(temp_dir.name, file.name)
        with open(temp_filepath, "wb") as f:
            f.write(file.getvalue())
    reader = SimpleDirectoryReader(input_dir=temp_dir.name)
    return reader.load_data()
if uploaded_files:
    if text != DEFAULT_TEXT:
        st.warning("Text will be ignored when uploading files")
    docs = load_document(uploaded_files)
    text = "\n".join([doc.text for doc in docs])
chunk_size = st.slider(
    "Chunk Size",
    value=512,
    min_value=1,
    max_value=4096,
)
chunk_overlap = st.slider(
    "Chunk Overlap",
    value=0,
    min_value=0,
    max_value=4096,
)
cols = st.columns(n_cols)
for ind, col in enumerate(cols):
    if type == "Text":
        text_splitter_cls = col.selectbox(
            "Text Splitter",
            options=[
                "TokenTextSplitter",
                "SentenceSplitter",
                "LC:RecursiveCharacterTextSplitter",
                "LC:CharacterTextSplitter",
                "LC:TokenTextSplitter",
            ],
            index=ind,
            key=f"splitter_cls_{ind}",
        )
        text_splitter: Union[TextSplitter, LCSplitter]
        if text_splitter_cls == "TokenTextSplitter":
            text_splitter = TokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "SentenceSplitter":
            text_splitter = SentenceSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:RecursiveCharacterTextSplitter":
            text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:CharacterTextSplitter":
            text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        elif text_splitter_cls == "LC:TokenTextSplitter":
            text_splitter = LCTokenTextSplitter(
                chunk_size=chunk_size, chunk_overlap=chunk_overlap
            )
        else:
            raise ValueError("Unknown text splitter")
    elif type == "Code":
        text_splitter_cls = col.selectbox("Text Splitter", options=["CodeSplitter"])
        if text_splitter_cls == "CodeSplitter":
            language = col.text_input("Language", value="python")
            max_chars = col.slider("Max Chars", value=1500)
            text_splitter = CodeSplitter(language=language, max_chars=max_chars)
        else:
            raise ValueError("Unknown text splitter")
    chunks = text_splitter.split_text(text)
    tokenizer = tiktoken.get_encoding("gpt2").encode
    for chunk_ind, chunk in enumerate(chunks):
        n_tokens = len(tokenizer(chunk))
        n_chars = len(chunk)
        col.text_area(
            f"Chunk {chunk_ind} - {n_tokens} tokens - {n_chars} chars",
            chunk,
            key=f"text_area_{ind}_{chunk_ind}",
            height=500,
        )
 | 
	[
  "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
  "langchain.text_splitter.TokenTextSplitter"
] | 
	[((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_multiple_files': '(True)'}), "('Upload file', accept_multiple_files=True)\n", (814, 857), True, 'import streamlit as st\n'), ((865, 924), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Document Type"""'], {'options': "['Text', 'Code']"}), "('Document Type', options=['Text', 'Code'])\n", (881, 924), True, 'import streamlit as st\n'), ((934, 1003), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Columns"""'], {'value': '(2)', 'min_value': '(1)', 'max_value': '(3)'}), "('Columns', value=2, min_value=1, max_value=3)\n", (957, 1003), True, 'import streamlit as st\n'), ((1038, 1065), 'streamlit.cache_resource', 'st.cache_resource', ([], {'ttl': '(3600)'}), '(ttl=3600)\n', (1055, 1065), True, 'import streamlit as st\n'), ((1692, 1755), 'streamlit.slider', 'st.slider', (['"""Chunk Size"""'], {'value': '(512)', 'min_value': '(1)', 'max_value': '(4096)'}), "('Chunk Size', value=512, min_value=1, max_value=4096)\n", (1701, 1755), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.slider', 'st.slider', (['"""Chunk Overlap"""'], {'value': '(0)', 'min_value': '(0)', 'max_value': '(4096)'}), "('Chunk Overlap', value=0, min_value=0, max_value=4096)\n", (1800, 1855), True, 'import streamlit as st\n'), ((1883, 1901), 'streamlit.columns', 'st.columns', (['n_cols'], {}), '(n_cols)\n', (1893, 1901), True, 'import streamlit as st\n'), ((1175, 1204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1202, 1204), False, 'import tempfile\n'), ((1396, 1442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'temp_dir.name'}), '(input_dir=temp_dir.name)\n', (1417, 1442), False, 'from llama_index import SimpleDirectoryReader\n'), ((1261, 1299), 'os.path.join', 'os.path.join', (['temp_dir.name', 'file.name'], {}), '(temp_dir.name, file.name)\n', (1273, 1299), False, 'import os\n'), ((1531, 1586), 'streamlit.warning', 'st.warning', (['"""Text will be ignored when uploading files"""'], {}), "('Text will be ignored when uploading files')\n", (1541, 1586), True, 'import streamlit as st\n'), ((3968, 3997), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (3989, 3997), False, 'import tiktoken\n'), ((2486, 2555), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2503, 2555), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2668, 2736), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2684, 2736), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((3786, 3838), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': 'language', 'max_chars': 'max_chars'}), '(language=language, max_chars=max_chars)\n', (3798, 3838), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2866, 2974), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (2918, 2974), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3091, 3190), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n    chunk_overlap=chunk_overlap)\n', (3134, 3190), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3303, 3374), 'langchain.text_splitter.TokenTextSplitter', 'LCTokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3322, 3374), True, 'from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter\n')] | 
| 
	import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
    # Remove the triple backticks if present
    json_string = json_string.strip()
    start_index = json_string.find("```json")
    end_index = json_string.find("```", start_index + len("```json"))
    if start_index != -1 and end_index != -1:
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif start_index != -1 and end_index == -1 and json_string.endswith("``"):
        end_index = json_string.find("``", start_index + len("```json"))
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif json_string.startswith("{"):
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(json_string)
    else:
        raise Exception("Could not find JSON block in the output.")
    return parsed
def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
    try:
        json_obj = parse_json_markdown(text)
    except json.JSONDecodeError as e:
        raise OutputParserException(f"Got invalid JSON object. Error: {e}")
    for key in expected_keys:
        if key not in json_obj:
            raise OutputParserException(
                f"Got invalid return object. Expected key `{key}` "
                f"to be present, but got {json_obj}"
            )
    return json_obj
 | 
	[
  "langchain.schema.OutputParserException"
] | 
	[((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n    f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n    )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')] | 
| 
	import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
    # Remove the triple backticks if present
    json_string = json_string.strip()
    start_index = json_string.find("```json")
    end_index = json_string.find("```", start_index + len("```json"))
    if start_index != -1 and end_index != -1:
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif start_index != -1 and end_index == -1 and json_string.endswith("``"):
        end_index = json_string.find("``", start_index + len("```json"))
        extracted_content = json_string[start_index + len("```json"):end_index].strip()
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(extracted_content)
    elif json_string.startswith("{"):
        # Parse the JSON string into a Python dictionary
        parsed = json.loads(json_string)
    else:
        raise Exception("Could not find JSON block in the output.")
    return parsed
def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
    try:
        json_obj = parse_json_markdown(text)
    except json.JSONDecodeError as e:
        raise OutputParserException(f"Got invalid JSON object. Error: {e}")
    for key in expected_keys:
        if key not in json_obj:
            raise OutputParserException(
                f"Got invalid return object. Expected key `{key}` "
                f"to be present, but got {json_obj}"
            )
    return json_obj
 | 
	[
  "langchain.schema.OutputParserException"
] | 
	[((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n    f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n    )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')] | 
| 
	# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
    def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf
        self.sentence_size = sentence_size
    def split_text1(self, text: str) -> List[str]:
        if self.pdf:
            text = re.sub(r"\n{3,}", "\n", text)
            text = re.sub('\s', ' ', text)
            text = text.replace("\n\n", "")
        sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')  # del :;
        sent_list = []
        for ele in sent_sep_pattern.split(text):
            if sent_sep_pattern.match(ele) and sent_list:
                sent_list[-1] += ele
            elif ele:
                sent_list.append(ele)
        return sent_list
    def split_text(self, text: str) -> List[str]:   ##此处需要进一步优化逻辑
        if self.pdf:
            text = re.sub(r"\n{3,}", r"\n", text)
            text = re.sub('\s', " ", text)
            text = re.sub("\n\n", "", text)
        text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text)  # 单字符断句符
        text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)  # 英文省略号
        text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)  # 中文省略号
        text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
        # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
        text = text.rstrip()  # 段尾如果有多余的\n就去掉它
        # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
        ls = [i for i in text.split("\n") if i]
        for ele in ls:
            if len(ele) > self.sentence_size:
                ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
                ele1_ls = ele1.split("\n")
                for ele_ele1 in ele1_ls:
                    if len(ele_ele1) > self.sentence_size:
                        ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
                        ele2_ls = ele_ele2.split("\n")
                        for ele_ele2 in ele2_ls:
                            if len(ele_ele2) > self.sentence_size:
                                ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
                                ele2_id = ele2_ls.index(ele_ele2)
                                ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
                                                                                                       ele2_id + 1:]
                        ele_id = ele1_ls.index(ele_ele1)
                        ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
                id = ls.index(ele)
                ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
        return ls
def load_file(filepath, sentence_size):
    loader = UnstructuredFileLoader(filepath, mode="elements")
    textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
    docs = loader.load_and_split(text_splitter=textsplitter)
    # write_check_file(filepath, docs)
    return docs
 | 
	[
  "langchain.document_loaders.UnstructuredFileLoader"
] | 
	[((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))"""'], {}), '(\'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))\')\n', (667, 714), False, 'import re\n'), ((1218, 1269), 're.sub', 're.sub', (['"""([;;.!?。!?\\\\?])([^”’])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), "('([;;.!?。!?\\\\?])([^”’])', '\\\\1\\\\n\\\\2', text)\n", (1224, 1269), False, 'import re\n'), ((1293, 1340), 're.sub', 're.sub', (['"""(\\\\.{6})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\.{6})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1299, 1340), False, 'import re\n'), ((1363, 1410), 're.sub', 're.sub', (['"""(\\\\…{2})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\…{2})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1369, 1410), False, 'import re\n'), ((1433, 1504), 're.sub', 're.sub', (['"""([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])\', \'\\\\1\\\\n\\\\2\', text)\n', (1439, 1504), False, 'import re\n'), ((513, 542), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\n"""', 'text'], {}), "('\\\\n{3,}', '\\n', text)\n", (519, 542), False, 'import re\n'), ((562, 586), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (568, 586), False, 'import re\n'), ((1084, 1114), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\\\\n"""', 'text'], {}), "('\\\\n{3,}', '\\\\n', text)\n", (1090, 1114), False, 'import re\n'), ((1134, 1158), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (1140, 1158), False, 'import re\n'), ((1177, 1201), 're.sub', 're.sub', (['"""\n\n"""', '""""""', 'text'], {}), "('\\n\\n', '', text)\n", (1183, 1201), False, 'import re\n'), ((1816, 1871), 're.sub', 're.sub', (['"""([,,.]["’”」』]{0,2})([^,,.])"""', '"""\\\\1\\\\n\\\\2"""', 'ele'], {}), '(\'([,,.]["’”」』]{0,2})([^,,.])\', \'\\\\1\\\\n\\\\2\', ele)\n', (1822, 1871), False, 'import re\n'), ((2049, 2119), 're.sub', 're.sub', (['"""([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele1'], {}), '(\'([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])\', \'\\\\1\\\\n\\\\2\', ele_ele1)\n', (2055, 2119), False, 'import re\n'), ((2331, 2385), 're.sub', 're.sub', (['"""( ["’”」』]{0,2})([^ ])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele2'], {}), '(\'( ["’”」』]{0,2})([^ ])\', \'\\\\1\\\\n\\\\2\', ele_ele2)\n', (2337, 2385), False, 'import re\n')] | 
| 
	import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackHandler
from langchain_openai import ChatOpenAI
from pydantic import (
    UUID4,
    BaseModel,
    ConfigDict,
    Field,
    InstanceOf,
    PrivateAttr,
    field_validator,
    model_validator,
)
from pydantic_core import PydanticCustomError
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
from crewai.utilities import I18N, Logger, Prompts, RPMController
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
class Agent(BaseModel):
    """Represents an agent in a system.
    Each agent has a role, a goal, a backstory, and an optional language model (llm).
    The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
    Attributes:
            agent_executor: An instance of the CrewAgentExecutor class.
            role: The role of the agent.
            goal: The objective of the agent.
            backstory: The backstory of the agent.
            config: Dict representation of agent configuration.
            llm: The language model that will run the agent.
            function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm.
            max_iter: Maximum number of iterations for an agent to execute a task.
            memory: Whether the agent should have memory or not.
            max_rpm: Maximum number of requests per minute for the agent execution to be respected.
            verbose: Whether the agent execution should be in verbose mode.
            allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
            tools: Tools at agents disposal
            step_callback: Callback to be executed after each step of the agent execution.
            callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
    """
    __hash__ = object.__hash__  # type: ignore
    _logger: Logger = PrivateAttr()
    _rpm_controller: RPMController = PrivateAttr(default=None)
    _request_within_rpm_limit: Any = PrivateAttr(default=None)
    _token_process: TokenProcess = TokenProcess()
    formatting_errors: int = 0
    model_config = ConfigDict(arbitrary_types_allowed=True)
    id: UUID4 = Field(
        default_factory=uuid.uuid4,
        frozen=True,
        description="Unique identifier for the object, not set by user.",
    )
    role: str = Field(description="Role of the agent")
    goal: str = Field(description="Objective of the agent")
    backstory: str = Field(description="Backstory of the agent")
    config: Optional[Dict[str, Any]] = Field(
        description="Configuration for the agent",
        default=None,
    )
    max_rpm: Optional[int] = Field(
        default=None,
        description="Maximum number of requests per minute for the agent execution to be respected.",
    )
    memory: bool = Field(
        default=False, description="Whether the agent should have memory or not"
    )
    verbose: bool = Field(
        default=False, description="Verbose mode for the Agent Execution"
    )
    allow_delegation: bool = Field(
        default=True, description="Allow delegation of tasks to agents"
    )
    tools: Optional[List[Any]] = Field(
        default_factory=list, description="Tools at agents disposal"
    )
    max_iter: Optional[int] = Field(
        default=15, description="Maximum iterations for an agent to execute a task"
    )
    agent_executor: InstanceOf[CrewAgentExecutor] = Field(
        default=None, description="An instance of the CrewAgentExecutor class."
    )
    tools_handler: InstanceOf[ToolsHandler] = Field(
        default=None, description="An instance of the ToolsHandler class."
    )
    cache_handler: InstanceOf[CacheHandler] = Field(
        default=CacheHandler(), description="An instance of the CacheHandler class."
    )
    step_callback: Optional[Any] = Field(
        default=None,
        description="Callback to be executed after each step of the agent execution.",
    )
    i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
    llm: Any = Field(
        default_factory=lambda: ChatOpenAI(
            model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4")
        ),
        description="Language model that will run the agent.",
    )
    function_calling_llm: Optional[Any] = Field(
        description="Language model that will run the agent.", default=None
    )
    callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
        default=None, description="Callback to be executed"
    )
    def __init__(__pydantic_self__, **data):
        config = data.pop("config", {})
        super().__init__(**config, **data)
    @field_validator("id", mode="before")
    @classmethod
    def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
        if v:
            raise PydanticCustomError(
                "may_not_set_field", "This field is not to be set by the user.", {}
            )
    @model_validator(mode="after")
    def set_attributes_based_on_config(self) -> "Agent":
        """Set attributes based on the agent configuration."""
        if self.config:
            for key, value in self.config.items():
                setattr(self, key, value)
        return self
    @model_validator(mode="after")
    def set_private_attrs(self):
        """Set private attributes."""
        self._logger = Logger(self.verbose)
        if self.max_rpm and not self._rpm_controller:
            self._rpm_controller = RPMController(
                max_rpm=self.max_rpm, logger=self._logger
            )
        return self
    @model_validator(mode="after")
    def set_agent_executor(self) -> "Agent":
        """set agent executor is set."""
        if hasattr(self.llm, "model_name"):
            self.llm.callbacks = [
                TokenCalcHandler(self.llm.model_name, self._token_process)
            ]
        if not self.agent_executor:
            self.set_cache_handler(self.cache_handler)
        return self
    def execute_task(
        self,
        task: Any,
        context: Optional[str] = None,
        tools: Optional[List[Any]] = None,
    ) -> str:
        """Execute a task with the agent.
        Args:
            task: Task to execute.
            context: Context to execute the task in.
            tools: Tools to use for the task.
        Returns:
            Output of the agent
        """
        self.tools_handler.last_used_tool = {}
        task_prompt = task.prompt()
        if context:
            task_prompt = self.i18n.slice("task_with_context").format(
                task=task_prompt, context=context
            )
        tools = self._parse_tools(tools or self.tools)
        self.create_agent_executor(tools=tools)
        self.agent_executor.tools = tools
        self.agent_executor.task = task
        self.agent_executor.tools_description = render_text_description(tools)
        self.agent_executor.tools_names = self.__tools_names(tools)
        result = self.agent_executor.invoke(
            {
                "input": task_prompt,
                "tool_names": self.agent_executor.tools_names,
                "tools": self.agent_executor.tools_description,
            }
        )["output"]
        if self.max_rpm:
            self._rpm_controller.stop_rpm_counter()
        return result
    def set_cache_handler(self, cache_handler: CacheHandler) -> None:
        """Set the cache handler for the agent.
        Args:
            cache_handler: An instance of the CacheHandler class.
        """
        self.cache_handler = cache_handler
        self.tools_handler = ToolsHandler(cache=self.cache_handler)
        self.create_agent_executor()
    def set_rpm_controller(self, rpm_controller: RPMController) -> None:
        """Set the rpm controller for the agent.
        Args:
            rpm_controller: An instance of the RPMController class.
        """
        if not self._rpm_controller:
            self._rpm_controller = rpm_controller
            self.create_agent_executor()
    def create_agent_executor(self, tools=None) -> None:
        """Create an agent executor for the agent.
        Returns:
            An instance of the CrewAgentExecutor class.
        """
        tools = tools or self.tools
        agent_args = {
            "input": lambda x: x["input"],
            "tools": lambda x: x["tools"],
            "tool_names": lambda x: x["tool_names"],
            "agent_scratchpad": lambda x: self.format_log_to_str(
                x["intermediate_steps"]
            ),
        }
        executor_args = {
            "llm": self.llm,
            "i18n": self.i18n,
            "tools": self._parse_tools(tools),
            "verbose": self.verbose,
            "handle_parsing_errors": True,
            "max_iterations": self.max_iter,
            "step_callback": self.step_callback,
            "tools_handler": self.tools_handler,
            "function_calling_llm": self.function_calling_llm,
            "callbacks": self.callbacks,
        }
        if self._rpm_controller:
            executor_args[
                "request_within_rpm_limit"
            ] = self._rpm_controller.check_or_wait
        if self.memory:
            summary_memory = ConversationSummaryMemory(
                llm=self.llm, input_key="input", memory_key="chat_history"
            )
            executor_args["memory"] = summary_memory
            agent_args["chat_history"] = lambda x: x["chat_history"]
            prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory()
        else:
            prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
        execution_prompt = prompt.partial(
            goal=self.goal,
            role=self.role,
            backstory=self.backstory,
        )
        bind = self.llm.bind(stop=[self.i18n.slice("observation")])
        inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
        self.agent_executor = CrewAgentExecutor(
            agent=RunnableAgent(runnable=inner_agent), **executor_args
        )
    def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
        """Interpolate inputs into the agent description and backstory."""
        if inputs:
            self.role = self.role.format(**inputs)
            self.goal = self.goal.format(**inputs)
            self.backstory = self.backstory.format(**inputs)
    def increment_formatting_errors(self) -> None:
        """Count the formatting errors of the agent."""
        self.formatting_errors += 1
    def format_log_to_str(
        self,
        intermediate_steps: List[Tuple[AgentAction, str]],
        observation_prefix: str = "Observation: ",
        llm_prefix: str = "",
    ) -> str:
        """Construct the scratchpad that lets the agent continue its thought process."""
        thoughts = ""
        for action, observation in intermediate_steps:
            thoughts += action.log
            thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
        return thoughts
    def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
        """Parse tools to be used for the task."""
        # tentatively try to import from crewai_tools import BaseTool as CrewAITool
        tools_list = []
        try:
            from crewai_tools import BaseTool as CrewAITool
            for tool in tools:
                if isinstance(tool, CrewAITool):
                    tools_list.append(tool.to_langchain())
                else:
                    tools_list.append(tool)
        except ModuleNotFoundError:
            for tool in tools:
                tools_list.append(tool)
        return tools_list
    @staticmethod
    def __tools_names(tools) -> str:
        return ", ".join([t.name for t in tools])
    def __repr__(self):
        return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
 | 
	[
  "langchain.memory.ConversationSummaryMemory",
  "langchain.agents.agent.RunnableAgent",
  "langchain.tools.render.render_text_description"
] | 
	[((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2454, 2468), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2506, 2531), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2517, 2531), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2567, 2581), 'crewai.utilities.token_counter_callback.TokenProcess', 'TokenProcess', ([], {}), '()\n', (2579, 2581), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((2633, 2673), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (2643, 2673), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2690, 2807), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'frozen': '(True)', 'description': '"""Unique identifier for the object, not set by user."""'}), "(default_factory=uuid.uuid4, frozen=True, description=\n    'Unique identifier for the object, not set by user.')\n", (2695, 2807), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2850, 2888), 'pydantic.Field', 'Field', ([], {'description': '"""Role of the agent"""'}), "(description='Role of the agent')\n", (2855, 2888), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2905, 2948), 'pydantic.Field', 'Field', ([], {'description': '"""Objective of the agent"""'}), "(description='Objective of the agent')\n", (2910, 2948), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2970, 3013), 'pydantic.Field', 'Field', ([], {'description': '"""Backstory of the agent"""'}), "(description='Backstory of the agent')\n", (2975, 3013), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3053, 3115), 'pydantic.Field', 'Field', ([], {'description': '"""Configuration for the agent"""', 'default': 'None'}), "(description='Configuration for the agent', default=None)\n", (3058, 3115), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3168, 3291), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Maximum number of requests per minute for the agent execution to be respected."""'}), "(default=None, description=\n    'Maximum number of requests per minute for the agent execution to be respected.'\n    )\n", (3173, 3291), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3324, 3403), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the agent should have memory or not"""'}), "(default=False, description='Whether the agent should have memory or not')\n", (3329, 3403), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3438, 3510), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Verbose mode for the Agent Execution"""'}), "(default=False, description='Verbose mode for the Agent Execution')\n", (3443, 3510), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3554, 3624), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Allow delegation of tasks to agents"""'}), "(default=True, description='Allow delegation of tasks to agents')\n", (3559, 3624), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3672, 3739), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Tools at agents disposal"""'}), "(default_factory=list, description='Tools at agents disposal')\n", (3677, 3739), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3784, 3871), 'pydantic.Field', 'Field', ([], {'default': '(15)', 'description': '"""Maximum iterations for an agent to execute a task"""'}), "(default=15, description=\n    'Maximum iterations for an agent to execute a task')\n", (3789, 3871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3933, 4011), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the CrewAgentExecutor class."""'}), "(default=None, description='An instance of the CrewAgentExecutor class.')\n", (3938, 4011), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4072, 4145), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the ToolsHandler class."""'}), "(default=None, description='An instance of the ToolsHandler class.')\n", (4077, 4145), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4339, 4442), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed after each step of the agent execution."""'}), "(default=None, description=\n    'Callback to be executed after each step of the agent execution.')\n", (4344, 4442), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4797, 4871), 'pydantic.Field', 'Field', ([], {'description': '"""Language model that will run the agent."""', 'default': 'None'}), "(description='Language model that will run the agent.', default=None)\n", (4802, 4871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4951, 5009), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed"""'}), "(default=None, description='Callback to be executed')\n", (4956, 5009), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5159, 5195), 'pydantic.field_validator', 'field_validator', (['"""id"""'], {'mode': '"""before"""'}), "('id', mode='before')\n", (5174, 5195), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5430, 5459), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5445, 5459), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5723, 5752), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5738, 5752), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((6070, 6099), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (6085, 6099), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5847, 5867), 'crewai.utilities.Logger', 'Logger', (['self.verbose'], {}), '(self.verbose)\n', (5853, 5867), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((7345, 7375), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (7368, 7375), False, 'from langchain.tools.render import render_text_description\n'), ((8088, 8126), 'crewai.agents.ToolsHandler', 'ToolsHandler', ([], {'cache': 'self.cache_handler'}), '(cache=self.cache_handler)\n', (8100, 8126), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4229, 4243), 'crewai.agents.CacheHandler', 'CacheHandler', ([], {}), '()\n', (4241, 4243), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4492, 4498), 'crewai.utilities.I18N', 'I18N', ([], {}), '()\n', (4496, 4498), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((5305, 5397), 'pydantic_core.PydanticCustomError', 'PydanticCustomError', (['"""may_not_set_field"""', '"""This field is not to be set by the user."""', '{}'], {}), "('may_not_set_field',\n    'This field is not to be set by the user.', {})\n", (5324, 5397), False, 'from pydantic_core import PydanticCustomError\n'), ((5957, 6013), 'crewai.utilities.RPMController', 'RPMController', ([], {'max_rpm': 'self.max_rpm', 'logger': 'self._logger'}), '(max_rpm=self.max_rpm, logger=self._logger)\n', (5970, 6013), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((9715, 9805), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'self.llm', 'input_key': '"""input"""', 'memory_key': '"""chat_history"""'}), "(llm=self.llm, input_key='input', memory_key=\n    'chat_history')\n", (9740, 9805), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((10407, 10434), 'crewai.agents.CrewAgentParser', 'CrewAgentParser', ([], {'agent': 'self'}), '(agent=self)\n', (10422, 10434), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((6281, 6339), 'crewai.utilities.token_counter_callback.TokenCalcHandler', 'TokenCalcHandler', (['self.llm.model_name', 'self._token_process'], {}), '(self.llm.model_name, self._token_process)\n', (6297, 6339), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((10502, 10537), 'langchain.agents.agent.RunnableAgent', 'RunnableAgent', ([], {'runnable': 'inner_agent'}), '(runnable=inner_agent)\n', (10515, 10537), False, 'from langchain.agents.agent import RunnableAgent\n'), ((9974, 10010), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (9981, 10010), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((10075, 10111), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (10082, 10111), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((4630, 4674), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MODEL_NAME"""', '"""gpt-4"""'], {}), "('OPENAI_MODEL_NAME', 'gpt-4')\n", (4644, 4674), False, 'import os\n')] | 
| 
	import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answer
Thought: you should always think about what to do
Action: Exactly only one word out of: {tool_names}
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
FORMAT_INSTRUCTIONS = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you.  For example user might ask: What is the latest news?
Here is an example sequence you can follow:
Thought: I should search online for the latest news.
Action: Search
Action Input: What is the latest news?
Observation: X is going away.  Z is again happening.
Thought: That is interesting, I should search for more information about X and Z and also search about Q.
Action: Search
Action Input: How is X impacting things.  Why is Z happening again, and what are the consequences?
Observation: X is causing Y.  Z may be caused by P and will lead to H.
Thought: I now know the final answer
Final Answer: The latest news is:
* X is going away, and this is caused by Y.
* Z is happening again, and the cause is P and will lead to H.
Overall, X and Z are important problems.
"""
FORMAT_INSTRUCTIONS_PYTHON = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you.  For example user might ask: How many rows are in the dataset?
Here is an example sequence you can follow.  You can repeat Thoughts, but as soon as possible you should try to answer the original user question.  Once you an answer the user question, just say: Thought: I now know the final answer
Thought: I should use python_repl_ast tool.
Action: python_repl_ast
Action Input: df.shape
Observation: (25, 10)
Thought: I now know the final answer
Final Answer: There are 25 rows in the dataset.
"""
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
    "Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
    "Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
    "Parsing LLM output produced both a final answer and a parse-able action:"
)
class H2OMRKLOutputParser(MRKLOutputParser):
    """MRKL Output parser for the chat agent."""
    def get_format_instructions(self) -> str:
        return FORMAT_INSTRUCTIONS
    def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
        includes_answer = FINAL_ANSWER_ACTION in text
        regex = (
            r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
        )
        action_match = re.search(regex, text, re.DOTALL)
        if includes_answer:
            return AgentFinish(
                {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
            )
        elif action_match:
            action = action_match.group(1).strip()
            action_input = action_match.group(2)
            tool_input = action_input.strip(" ")
            # ensure if its a well formed SQL query we don't remove any trailing " chars
            if tool_input.startswith("SELECT ") is False:
                tool_input = tool_input.strip('"')
            return AgentAction(action, tool_input, text)
        if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
            raise OutputParserException(
                f"Could not parse LLM output: `{text}`",
                observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
                llm_output=text,
                send_to_llm=True,
            )
        elif not re.search(
            r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
        ):
            raise OutputParserException(
                f"Could not parse LLM output: `{text}`",
                observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
                llm_output=text,
                send_to_llm=True,
            )
        else:
            raise OutputParserException(f"Could not parse LLM output: `{text}`")
    @property
    def _type(self) -> str:
        return "mrkl"
class H2OPythonMRKLOutputParser(H2OMRKLOutputParser):
    def get_format_instructions(self) -> str:
        return FORMAT_INSTRUCTIONS_PYTHON
 | 
	[
  "langchain.schema.AgentAction",
  "langchain.schema.OutputParserException"
] | 
	[((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DOTALL)\n", (3698, 3749), False, 'import re\n'), ((3766, 3928), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n    MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text,\n    send_to_llm=True)\n", (3787, 3928), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((3635, 3672), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'tool_input', 'text'], {}), '(action, tool_input, text)\n', (3646, 3672), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4016, 4103), 're.search', 're.search', (['"""[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)"""', 'text', 're.DOTALL'], {}), "('[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)', text, re.\n    DOTALL)\n", (4025, 4103), False, 'import re\n'), ((4133, 4300), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n    MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text,\n    send_to_llm=True)\n", (4154, 4300), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4403, 4465), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (4424, 4465), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] | 
| 
	import os
import re
import uuid
import cv2
import torch
import requests
import io, base64
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. 
TOOLS:
------
Visual ChatGPT  has access to the following tools:"""
VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
You will remember to provide the image file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination.
The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human. 
Thought: Do I need to use a tool? {agent_scratchpad}"""
ENDPOINT = "http://localhost:7860"
T2IAPI = ENDPOINT + "/controlnet/txt2img"
DETECTAPI = ENDPOINT + "/controlnet/detect"
MODELLIST = ENDPOINT + "/controlnet/model_list"
device = "cpu"
if torch.cuda.is_available():
    device = "cuda"
def readImage(path):
    img = cv2.imread(path)
    retval, buffer = cv2.imencode('.jpg', img)
    b64img = base64.b64encode(buffer).decode("utf-8")
    return b64img
def get_model(pattern='^control_canny.*'):
    r = requests.get(MODELLIST)
    result = r.json()["model_list"]
    for item in result:
        if re.match(pattern, item):
            return item
def do_webui_request(url=T2IAPI, **kwargs):
    reqbody = {
        "prompt": "best quality, extremely detailed",
        "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
        "seed": -1,
        "subseed": -1,
        "subseed_strength": 0,
        "batch_size": 1,
        "n_iter": 1,
        "steps": 15,
        "cfg_scale": 7,
        "width": 512,
        "height": 768,
        "restore_faces": True,
        "eta": 0,
        "sampler_index": "Euler a",
        "controlnet_input_images": [],
        "controlnet_module": 'canny',
        "controlnet_model": 'control_canny-fp16 [e3fe7712]',
        "controlnet_guidance": 1.0,
    }
    reqbody.update(kwargs)
    r = requests.post(url, json=reqbody)
    return r.json()
    
def cut_dialogue_history(history_memory, keep_last_n_words=500):
    tokens = history_memory.split()
    n_tokens = len(tokens)
    print(f"hitory_memory:{history_memory}, n_tokens: {n_tokens}")
    if n_tokens < keep_last_n_words:
        return history_memory
    else:
        paragraphs = history_memory.split('\n')
        last_n_tokens = n_tokens
        while last_n_tokens >= keep_last_n_words:
            last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
            paragraphs = paragraphs[1:]
        return '\n' + '\n'.join(paragraphs)
def get_new_image_name(org_img_name, func_name="update"):
    head_tail = os.path.split(org_img_name)
    head = head_tail[0]
    tail = head_tail[1]
    name_split = tail.split('.')[0].split('_')
    this_new_uuid = str(uuid.uuid4())[0:4]
    if len(name_split) == 1:
        most_org_file_name = name_split[0]
        recent_prev_file_name = name_split[0]
        new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
    else:
        assert len(name_split) == 4
        most_org_file_name = name_split[3]
        recent_prev_file_name = name_split[0]
        new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
    return os.path.join(head, new_file_name)
class MaskFormer:
    def __init__(self, device):
        self.device = device
        self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
        self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
    def inference(self, image_path, text):
        threshold = 0.5
        min_area = 0.02
        padding = 20
        original_image = Image.open(image_path)
        image = original_image.resize((512, 512))
        inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt",).to(self.device)
        with torch.no_grad():
            outputs = self.model(**inputs)
        mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
        area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
        if area_ratio < min_area:
            return None
        true_indices = np.argwhere(mask)
        mask_array = np.zeros_like(mask, dtype=bool)
        for idx in true_indices:
            padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
            mask_array[padded_slice] = True
        visual_mask = (mask_array * 255).astype(np.uint8)
        image_mask = Image.fromarray(visual_mask)
        return image_mask.resize(image.size)
    
# class ImageEditing:
#     def __init__(self, device):
#         print("Initializing StableDiffusionInpaint to %s" % device)
#         self.device = device
#         self.mask_former = MaskFormer(device=self.device)
#         # self.inpainting = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",).to(device)
#     def remove_part_of_image(self, input):
#         image_path, to_be_removed_txt = input.split(",")
#         print(f'remove_part_of_image: to_be_removed {to_be_removed_txt}')
#         return self.replace_part_of_image(f"{image_path},{to_be_removed_txt},background")
#     def replace_part_of_image(self, input):
#         image_path, to_be_replaced_txt, replace_with_txt = input.split(",")
#         print(f'replace_part_of_image: replace_with_txt {replace_with_txt}')
#         mask_image = self.mask_former.inference(image_path, to_be_replaced_txt)
#         buffered = io.BytesIO()
#         mask_image.save(buffered, format="JPEG")
#         resp = do_webui_request(
#             url=ENDPOINT + "/sdapi/v1/img2img",
#             init_images=[readImage(image_path)],
#             mask=base64.b64encode(buffered.getvalue()).decode("utf-8"),
#             prompt=replace_with_txt,
#         )
#         updated_image_path = get_new_image_name(image_path, func_name="replace-something")
#         with open(updated_image_path, 'wb') as f:
#             f.write(base64.b64decode(resp['images'][0]))
#         return updated_image_path
# class Pix2Pix:
#     def __init__(self, device):
#         print("Initializing Pix2Pix to %s" % device)
#         self.device = device
#         self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None).to(device)
#         self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
#     def inference(self, inputs):
#         """Change style of image."""
#         print("===>Starting Pix2Pix Inference")
#         image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
#         original_image = Image.open(image_path)
#         image = self.pipe(instruct_text,image=original_image,num_inference_steps=40,image_guidance_scale=1.2,).images[0]
#         updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
#         image.save(updated_image_path)
#         return updated_image_path
class T2I:
    def __init__(self, device):
        print("Initializing T2I to %s" % device)
        self.device = device
        self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
        
    def inference(self, text):
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
        print(f'{text} refined to {refined_text}')
        resp = do_webui_request(
            url=ENDPOINT + "/sdapi/v1/txt2img",
            prompt=refined_text,
        )
        with open(image_filename, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
        return image_filename
class ImageCaptioning:
    def __init__(self, device):
        print("Initializing ImageCaptioning to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
        self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
    def inference(self, image_path):
        inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        captions = self.processor.decode(out[0], skip_special_tokens=True)
        return captions
    
    
class image2canny:
    def inference(self, inputs):
        print("===>Starting image2canny Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="segmentation",
        )
        updated_image_path = get_new_image_name(inputs, func_name="edge")
        image.save(updated_image_path)
        return updated_image_path
class canny2image:
    def inference(self, inputs):
        print("===>Starting canny2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_canny.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="canny2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2line:
    def inference(self, inputs):
        print("===>Starting image2hough Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="mlsd",
        )
        updated_image_path = get_new_image_name(inputs, func_name="line-of")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class line2image:
    def inference(self, inputs):
        print("===>Starting line2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_mlsd.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="line2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2hed:
    def inference(self, inputs):
        print("===>Starting image2hed Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="hed",
        )
        updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class hed2image:
    def inference(self, inputs):
        print("===>Starting hed2image Inference")
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_hed.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="hed2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2scribble:
    def inference(self, inputs):
        print("===>Starting image2scribble Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="scribble",
        )
        updated_image_path = get_new_image_name(inputs, func_name="scribble")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class scribble2image:
    def inference(self, inputs):
        print("===>Starting seg2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_scribble.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
    
    
class image2pose:
    def inference(self, inputs):
        print("===>Starting image2pose Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="openpose",
        )
        updated_image_path = get_new_image_name(inputs, func_name="human-pose")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class pose2image:
    def inference(self, inputs):
        print("===>Starting pose2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_openpose.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="pose2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2seg:
    def inference(self, inputs):
        print("===>Starting image2seg Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="segmentation",
        )
        updated_image_path = get_new_image_name(inputs, func_name="segmentation")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class seg2image:
    def inference(self, inputs):
        print("===>Starting seg2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="none",
            controlnet_model=get_model(pattern='^control_seg.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="segment2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2depth:
    def inference(self, inputs):
        print("===>Starting image2depth Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="depth",
        )
        updated_image_path = get_new_image_name(inputs, func_name="depth")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class depth2image:
    def inference(self, inputs):
        print("===>Starting depth2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="depth",
            controlnet_model=get_model(pattern='^control_depth.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="depth2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class image2normal:
    def inference(self, inputs):
        print("===>Starting image2 normal Inference")
        resp = do_webui_request(
            url=DETECTAPI,
            controlnet_input_images=[readImage(inputs)], 
            controlnet_module="normal",
        )
        updated_image_path = get_new_image_name(inputs, func_name="normal-map")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class normal2image:
    def inference(self, inputs):
        print("===>Starting normal2image Inference")
        image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
        resp = do_webui_request(
            prompt=instruct_text,
            controlnet_input_images=[readImage(image_path)], 
            controlnet_module="normal",
            controlnet_model=get_model(pattern='^control_normal.*'),
        )
        updated_image_path = get_new_image_name(image_path, func_name="normal2image")
        with open(updated_image_path, 'wb') as f:
            f.write(base64.b64decode(resp['images'][0]))
        return updated_image_path
class BLIPVQA:
    def __init__(self, device):
        print("Initializing BLIP VQA to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
        self.model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(self.device)
    def get_answer_from_question_and_image(self, inputs):
        image_path, question = inputs.split(",")
        raw_image = Image.open(image_path).convert('RGB')
        print(F'BLIPVQA :question :{question}')
        inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        answer = self.processor.decode(out[0], skip_special_tokens=True)
        return answer
class ConversationBot:
    def __init__(self):
        print("Initializing VisualChatGPT")
        # self.edit = ImageEditing(device=device)
        self.i2t = ImageCaptioning(device=device)
        self.t2i = T2I(device=device)
        self.image2canny = image2canny()
        self.canny2image = canny2image()
        self.image2line = image2line()
        self.line2image = line2image()
        self.image2hed = image2hed()
        self.hed2image = hed2image()
        self.image2scribble = image2scribble()
        self.scribble2image = scribble2image()
        self.image2pose = image2pose()
        self.pose2image = pose2image()
        self.BLIPVQA = BLIPVQA(device=device)
        self.image2seg = image2seg()
        self.seg2image = seg2image()
        self.image2depth = image2depth()
        self.depth2image = depth2image()
        self.image2normal = image2normal()
        self.normal2image = normal2image()
        # self.pix2pix = Pix2Pix(device="cuda:3")
        self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
        self.tools = [
            Tool(name="Get Photo Description", func=self.i2t.inference,
                 description="useful when you want to know what is inside the photo. receives image_path as input. "
                             "The input to this tool should be a string, representing the image_path. "),
            Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
                 description="useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
                             "The input to this tool should be a string, representing the text used to generate image. "),
            # Tool(name="Remove Something From The Photo", func=self.edit.remove_part_of_image,
            #      description="useful when you want to remove and object or something from the photo from its description or location. "
            #                  "The input to this tool should be a comma seperated string of two, representing the image_path and the object need to be removed. "),
            # Tool(name="Replace Something From The Photo", func=self.edit.replace_part_of_image,
            #      description="useful when you want to replace an object from the object description or location with another object from its description. "
            #                  "The input to this tool should be a comma seperated string of three, representing the image_path, the object to be replaced, the object to be replaced with "),
            # Tool(name="Instruct Image Using Text", func=self.pix2pix.inference,
            #      description="useful when you want to the style of the image to be like the text. like: make it look like a painting. or make it like a robot. "
            #                  "The input to this tool should be a comma seperated string of two, representing the image_path and the text. "),
            Tool(name="Answer Question About The Image", func=self.BLIPVQA.get_answer_from_question_and_image,
                 description="useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the question"),
            Tool(name="Edge Detection On Image", func=self.image2canny.inference,
                 description="useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Canny Image", func=self.canny2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
            Tool(name="Line Detection On Image", func=self.image2line.inference,
                 description="useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Line Image", func=self.line2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
            Tool(name="Hed Detection On Image", func=self.image2hed.inference,
                 description="useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Soft Hed Boundary Image", func=self.hed2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Segmentation On Image", func=self.image2seg.inference,
                 description="useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Segmentations", func=self.seg2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Predict Depth On Image", func=self.image2depth.inference,
                 description="useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Depth",  func=self.depth2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Predict Normal Map On Image", func=self.image2normal.inference,
                 description="useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Normal Map", func=self.normal2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Sketch Detection On Image", func=self.image2scribble.inference,
                 description="useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Sketch Image", func=self.scribble2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
            Tool(name="Pose Detection On Image", func=self.image2pose.inference,
                 description="useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. "
                             "The input to this tool should be a string, representing the image_path"),
            Tool(name="Generate Image Condition On Pose Image", func=self.pose2image.inference,
                 description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
                             "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")]
        
    def init_langchain(self, openai_api_key):
        self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
        self.agent = initialize_agent(
            self.tools,
            self.llm,
            agent="conversational-react-description",
            verbose=True,
            memory=self.memory,
            return_intermediate_steps=True,
            agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}
        )
    def run_text(self, openai_api_key, text, state):
        if not hasattr(self, "agent"):
            self.init_langchain(openai_api_key)
        print("===============Running run_text =============")
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        print("======>Current memory:\n %s" % self.agent.memory)
        response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
        state = state + [(text, response)]
        print("Outputs:", state)
        return state, state
    def run_image(self, openai_api_key, image, state, txt):
        if not hasattr(self, "agent"):
            self.init_langchain(openai_api_key)
        print("===============Running run_image =============")
        print("Inputs:", image, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        print("======>Auto Resize Image...")
        img = Image.open(image.name)
        width, height = img.size
        ratio = min(512 / width, 512 / height)
        width_new, height_new = (round(width * ratio), round(height * ratio))
        img = img.resize((width_new, height_new))
        img = img.convert('RGB')
        img.save(image_filename, "PNG")
        print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
        description = self.i2t.inference(image_filename)
        Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
                       "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
        AI_prompt = "Received.  "
        self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
        print("======>Current memory:\n %s" % self.agent.memory)
        state = state + [(f"*{image_filename}*", AI_prompt)]
        print("Outputs:", state)
        return state, state, txt + ' ' + image_filename + ' '
if __name__ == '__main__':
    os.makedirs("image/", exist_ok=True)
    bot = ConversationBot()
    with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
        openai_api_key = gr.Textbox(type="password", label="Enter your OpenAI API key here")       
        chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
        state = gr.State([])
        with gr.Row():
            with gr.Column(scale=0.7):
                txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
            with gr.Column(scale=0.15, min_width=0):
                clear = gr.Button("Clear️")
            with gr.Column(scale=0.15, min_width=0):
                btn = gr.UploadButton("Upload", file_types=["image"])
                
        txt.submit(bot.run_text, [openai_api_key, txt, state], [chatbot, state])
        txt.submit(lambda: "", None, txt)
        btn.upload(bot.run_image, [openai_api_key, btn, state, txt], [chatbot, state, txt])
        clear.click(bot.memory.clear)
        clear.click(lambda: [], None, chatbot)
        clear.click(lambda: [], None, state)
    
    
    demo.launch(server_name="0.0.0.0", server_port=7864) | 
	[
  "langchain.llms.openai.OpenAI",
  "langchain.agents.tools.Tool",
  "langchain.chains.conversation.memory.ConversationBufferMemory",
  "langchain.agents.initialize.initialize_agent"
] | 
	[((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg', img)\n", (3941, 3954), False, 'import cv2\n'), ((4079, 4102), 'requests.get', 'requests.get', (['MODELLIST'], {}), '(MODELLIST)\n', (4091, 4102), False, 'import requests\n'), ((5000, 5032), 'requests.post', 'requests.post', (['url'], {'json': 'reqbody'}), '(url, json=reqbody)\n', (5013, 5032), False, 'import requests\n'), ((5695, 5722), 'os.path.split', 'os.path.split', (['org_img_name'], {}), '(org_img_name)\n', (5708, 5722), False, 'import os\n'), ((6361, 6394), 'os.path.join', 'os.path.join', (['head', 'new_file_name'], {}), '(head, new_file_name)\n', (6373, 6394), False, 'import os\n'), ((35612, 35648), 'os.makedirs', 'os.makedirs', (['"""image/"""'], {'exist_ok': '(True)'}), "('image/', exist_ok=True)\n", (35623, 35648), False, 'import os\n'), ((4174, 4197), 're.match', 're.match', (['pattern', 'item'], {}), '(pattern, item)\n', (4182, 4197), False, 'import re\n'), ((6500, 6562), 'transformers.CLIPSegProcessor.from_pretrained', 'CLIPSegProcessor.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6532, 6562), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((6807, 6829), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6817, 6829), False, 'from PIL import Image\n'), ((7306, 7323), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7317, 7323), True, 'import numpy as np\n'), ((7345, 7376), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (7358, 7376), True, 'import numpy as np\n'), ((7624, 7652), 'PIL.Image.fromarray', 'Image.fromarray', (['visual_mask'], {}), '(visual_mask)\n', (7639, 7652), False, 'from PIL import Image\n'), ((10312, 10384), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10341, 10384), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10418, 10497), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10454, 10497), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10535, 10655), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n    text_refine_tokenizer, device=self.device)\n", (10543, 10655), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((11412, 11482), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (11441, 11482), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21185, 21242), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21214, 21242), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((22789, 22861), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (22813, 22861), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((32769, 32821), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), '(temperature=0, openai_api_key=openai_api_key)\n', (32775, 32821), False, 'from langchain.llms.openai import OpenAI\n'), ((32843, 33146), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions':\n    VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    VISUAL_CHATGPT_PREFIX, 'format_instructions':\n    VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX})\n", (32859, 33146), False, 'from langchain.agents.initialize import initialize_agent\n'), ((34415, 34437), 'PIL.Image.open', 'Image.open', (['image.name'], {}), '(image.name)\n', (34425, 34437), False, 'from PIL import Image\n'), ((35686, 35742), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (35695, 35742), True, 'import gradio as gr\n'), ((35777, 35844), 'gradio.Textbox', 'gr.Textbox', ([], {'type': '"""password"""', 'label': '"""Enter your OpenAI API key here"""'}), "(type='password', label='Enter your OpenAI API key here')\n", (35787, 35844), True, 'import gradio as gr\n'), ((35870, 35923), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""Visual ChatGPT"""'}), "(elem_id='chatbot', label='Visual ChatGPT')\n", (35880, 35923), True, 'import gradio as gr\n'), ((35940, 35952), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (35948, 35952), True, 'import gradio as gr\n'), ((3968, 3992), 'base64.b64encode', 'base64.b64encode', (['buffer'], {}), '(buffer)\n', (3984, 3992), False, 'import io, base64\n'), ((5842, 5854), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5852, 5854), False, 'import uuid\n'), ((7010, 7025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7023, 7025), False, 'import torch\n'), ((22897, 23139), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n    'useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n    )\n", (22901, 23139), False, 'from langchain.agents.tools import Tool\n'), ((23192, 23572), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n    description=\n    'useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n    )\n", (23196, 23572), False, 'from langchain.agents.tools import Tool\n'), ((24857, 25269), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Answer Question About The Image"""', 'func': 'self.BLIPVQA.get_answer_from_question_and_image', 'description': '"""useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question"""'}), "(name='Answer Question About The Image', func=self.BLIPVQA.\n    get_answer_from_question_and_image, description=\n    'useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question'\n    )\n", (24861, 25269), False, 'from langchain.agents.tools import Tool\n'), ((25317, 25688), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Edge Detection On Image"""', 'func': 'self.image2canny.inference', 'description': '"""useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Edge Detection On Image', func=self.image2canny.inference,\n    description=\n    'useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (25321, 25688), False, 'from langchain.agents.tools import Tool\n'), ((25737, 26224), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Canny Image"""', 'func': 'self.canny2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Canny Image', func=self.canny2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n    )\n", (25741, 26224), False, 'from langchain.agents.tools import Tool\n'), ((26272, 26685), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Line Detection On Image"""', 'func': 'self.image2line.inference', 'description': '"""useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Line Detection On Image', func=self.image2line.inference,\n    description=\n    'useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (26276, 26685), False, 'from langchain.agents.tools import Tool\n'), ((26734, 27239), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Line Image"""', 'func': 'self.line2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Line Image', func=self.line2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n    )\n", (26738, 27239), False, 'from langchain.agents.tools import Tool\n'), ((27287, 27703), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Hed Detection On Image"""', 'func': 'self.image2hed.inference', 'description': '"""useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Hed Detection On Image', func=self.image2hed.inference,\n    description=\n    'useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (27291, 27703), False, 'from langchain.agents.tools import Tool\n'), ((27752, 28273), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Soft Hed Boundary Image"""', 'func': 'self.hed2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Soft Hed Boundary Image', func=self.\n    hed2image.inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (27756, 28273), False, 'from langchain.agents.tools import Tool\n'), ((28321, 28650), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Segmentation On Image"""', 'func': 'self.image2seg.inference', 'description': '"""useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Segmentation On Image', func=self.image2seg.inference,\n    description=\n    'useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path'\n    )\n", (28325, 28650), False, 'from langchain.agents.tools import Tool\n'), ((28699, 29195), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Segmentations"""', 'func': 'self.seg2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Segmentations', func=self.seg2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (28703, 29195), False, 'from langchain.agents.tools import Tool\n'), ((29243, 29580), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Depth On Image"""', 'func': 'self.image2depth.inference', 'description': '"""useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Depth On Image', func=self.image2depth.inference,\n    description=\n    'useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path'\n    )\n", (29247, 29580), False, 'from langchain.agents.tools import Tool\n'), ((29629, 30104), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Depth"""', 'func': 'self.depth2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Depth', func=self.depth2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (29633, 30104), False, 'from langchain.agents.tools import Tool\n'), ((30153, 30461), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Normal Map On Image"""', 'func': 'self.image2normal.inference', 'description': '"""useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Normal Map On Image', func=self.image2normal.inference,\n    description=\n    'useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path'\n    )\n", (30157, 30461), False, 'from langchain.agents.tools import Tool\n'), ((30510, 30990), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Normal Map"""', 'func': 'self.normal2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Normal Map', func=self.normal2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (30514, 30990), False, 'from langchain.agents.tools import Tool\n'), ((31038, 31384), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sketch Detection On Image"""', 'func': 'self.image2scribble.inference', 'description': '"""useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Sketch Detection On Image', func=self.image2scribble.inference,\n    description=\n    'useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path'\n    )\n", (31042, 31384), False, 'from langchain.agents.tools import Tool\n'), ((31433, 31791), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Sketch Image"""', 'func': 'self.scribble2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Sketch Image', func=self.\n    scribble2image.inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (31437, 31791), False, 'from langchain.agents.tools import Tool\n'), ((31839, 32151), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Pose Detection On Image"""', 'func': 'self.image2pose.inference', 'description': '"""useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Pose Detection On Image', func=self.image2pose.inference,\n    description=\n    'useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path'\n    )\n", (31843, 32151), False, 'from langchain.agents.tools import Tool\n'), ((32200, 32659), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Pose Image"""', 'func': 'self.pose2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Pose Image', func=self.pose2image.\n    inference, description=\n    'useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n    )\n", (32204, 32659), False, 'from langchain.agents.tools import Tool\n'), ((35966, 35974), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (35972, 35974), True, 'import gradio as gr\n'), ((6584, 6657), 'transformers.CLIPSegForImageSegmentation.from_pretrained', 'CLIPSegForImageSegmentation.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6627, 6657), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((7172, 7189), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7183, 7189), True, 'import numpy as np\n'), ((11089, 11124), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (11105, 11124), False, 'import io, base64\n'), ((11504, 11594), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n    'Salesforce/blip-image-captioning-base')\n", (11548, 11594), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((12911, 12946), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (12927, 12946), False, 'import io, base64\n'), ((13400, 13435), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (13416, 13435), False, 'import io, base64\n'), ((14063, 14098), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14079, 14098), False, 'import io, base64\n'), ((14553, 14588), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14569, 14588), False, 'import io, base64\n'), ((15122, 15157), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15138, 15157), False, 'import io, base64\n'), ((15623, 15658), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15639, 15658), False, 'import io, base64\n'), ((16297, 16332), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16313, 16332), False, 'import io, base64\n'), ((16800, 16835), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16816, 16835), False, 'import io, base64\n'), ((17467, 17502), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17483, 17502), False, 'import io, base64\n'), ((17966, 18001), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17982, 18001), False, 'import io, base64\n'), ((18629, 18664), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (18645, 18664), False, 'import io, base64\n'), ((19118, 19153), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19134, 19153), False, 'import io, base64\n'), ((19786, 19821), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19802, 19821), False, 'import io, base64\n'), ((20284, 20319), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20300, 20319), False, 'import io, base64\n'), ((20957, 20992), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20973, 20992), False, 'import io, base64\n'), ((21264, 21332), 'transformers.BlipForQuestionAnswering.from_pretrained', 'BlipForQuestionAnswering.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21304, 21332), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21477, 21499), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (21487, 21499), False, 'from PIL import Image\n'), ((35993, 36013), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (36002, 36013), True, 'import gradio as gr\n'), ((36168, 36202), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36177, 36202), True, 'import gradio as gr\n'), ((36228, 36247), 'gradio.Button', 'gr.Button', (['"""Clear️"""'], {}), "('Clear️')\n", (36237, 36247), True, 'import gradio as gr\n'), ((36265, 36299), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36274, 36299), True, 'import gradio as gr\n'), ((36323, 36370), 'gradio.UploadButton', 'gr.UploadButton', (['"""Upload"""'], {'file_types': "['image']"}), "('Upload', file_types=['image'])\n", (36338, 36370), True, 'import gradio as gr\n'), ((11676, 11698), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (11686, 11698), False, 'from PIL import Image\n'), ((10742, 10754), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10752, 10754), False, 'import uuid\n'), ((34327, 34339), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34337, 34339), False, 'import uuid\n'), ((36037, 36132), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n    'Enter text and press enter, or upload an image')\n", (36047, 36132), True, 'import gradio as gr\n'), ((7085, 7110), 'torch.sigmoid', 'torch.sigmoid', (['outputs[0]'], {}), '(outputs[0])\n', (7098, 7110), False, 'import torch\n')] | 
| 
	from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
    AuthenticationError,
    InvalidRequestError,
    RateLimitError,
    ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
    try:
        return parser.parse(completion)
    except OutputParserException as e:
        raise OpenAIError(
            e, "There was an issue parsing the response from the AI model."
        )
async def openai_error_handler(
    func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
    try:
        return await func(*args, **kwargs)
    except ServiceUnavailableError as e:
        raise OpenAIError(
            e,
            "OpenAI is experiencing issues. Visit "
            "https://status.openai.com/ for more info.",
            should_log=not settings.custom_api_key,
        )
    except InvalidRequestError as e:
        if e.user_message.startswith("The model:"):
            raise OpenAIError(
                e,
                f"Your API key does not have access to your current model. Please use a different model.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except AuthenticationError as e:
        raise OpenAIError(
            e,
            "Authentication error: Ensure a valid API key is being used.",
            should_log=not settings.custom_api_key,
        )
    except RateLimitError as e:
        if e.user_message.startswith("You exceeded your current quota"):
            raise OpenAIError(
                e,
                f"Your API key exceeded your current quota, please check your plan and billing details.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except Exception as e:
        raise OpenAIError(
            e, "There was an unexpected issue getting a response from the AI model."
        )
async def call_model_with_handling(
    model: BaseChatModel,
    prompt: BasePromptTemplate,
    args: Dict[str, str],
    settings: ModelSettings,
    **kwargs: Any,
) -> str:
    chain = LLMChain(llm=model, prompt=prompt)
    return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
 | 
	[
  "langchain.LLMChain"
] | 
	[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n    , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'Authentication error: Ensure a valid API key is being used.',\n    should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n    'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key does not have access to your current model. Please use a different model.'\n    , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key exceeded your current quota, please check your plan and billing details.'\n    , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] | 
| 
	from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
    AuthenticationError,
    InvalidRequestError,
    RateLimitError,
    ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
    try:
        return parser.parse(completion)
    except OutputParserException as e:
        raise OpenAIError(
            e, "There was an issue parsing the response from the AI model."
        )
async def openai_error_handler(
    func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
    try:
        return await func(*args, **kwargs)
    except ServiceUnavailableError as e:
        raise OpenAIError(
            e,
            "OpenAI is experiencing issues. Visit "
            "https://status.openai.com/ for more info.",
            should_log=not settings.custom_api_key,
        )
    except InvalidRequestError as e:
        if e.user_message.startswith("The model:"):
            raise OpenAIError(
                e,
                f"Your API key does not have access to your current model. Please use a different model.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except AuthenticationError as e:
        raise OpenAIError(
            e,
            "Authentication error: Ensure a valid API key is being used.",
            should_log=not settings.custom_api_key,
        )
    except RateLimitError as e:
        if e.user_message.startswith("You exceeded your current quota"):
            raise OpenAIError(
                e,
                f"Your API key exceeded your current quota, please check your plan and billing details.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except Exception as e:
        raise OpenAIError(
            e, "There was an unexpected issue getting a response from the AI model."
        )
async def call_model_with_handling(
    model: BaseChatModel,
    prompt: BasePromptTemplate,
    args: Dict[str, str],
    settings: ModelSettings,
    **kwargs: Any,
) -> str:
    chain = LLMChain(llm=model, prompt=prompt)
    return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
 | 
	[
  "langchain.LLMChain"
] | 
	[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n    , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'Authentication error: Ensure a valid API key is being used.',\n    should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n    'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key does not have access to your current model. Please use a different model.'\n    , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key exceeded your current quota, please check your plan and billing details.'\n    , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] | 
| 
	from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
    AuthenticationError,
    InvalidRequestError,
    RateLimitError,
    ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
    try:
        return parser.parse(completion)
    except OutputParserException as e:
        raise OpenAIError(
            e, "There was an issue parsing the response from the AI model."
        )
async def openai_error_handler(
    func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
    try:
        return await func(*args, **kwargs)
    except ServiceUnavailableError as e:
        raise OpenAIError(
            e,
            "OpenAI is experiencing issues. Visit "
            "https://status.openai.com/ for more info.",
            should_log=not settings.custom_api_key,
        )
    except InvalidRequestError as e:
        if e.user_message.startswith("The model:"):
            raise OpenAIError(
                e,
                f"Your API key does not have access to your current model. Please use a different model.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except AuthenticationError as e:
        raise OpenAIError(
            e,
            "Authentication error: Ensure a valid API key is being used.",
            should_log=not settings.custom_api_key,
        )
    except RateLimitError as e:
        if e.user_message.startswith("You exceeded your current quota"):
            raise OpenAIError(
                e,
                f"Your API key exceeded your current quota, please check your plan and billing details.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except Exception as e:
        raise OpenAIError(
            e, "There was an unexpected issue getting a response from the AI model."
        )
async def call_model_with_handling(
    model: BaseChatModel,
    prompt: BasePromptTemplate,
    args: Dict[str, str],
    settings: ModelSettings,
    **kwargs: Any,
) -> str:
    chain = LLMChain(llm=model, prompt=prompt)
    return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
 | 
	[
  "langchain.LLMChain"
] | 
	[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n    , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'Authentication error: Ensure a valid API key is being used.',\n    should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n    'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key does not have access to your current model. Please use a different model.'\n    , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key exceeded your current quota, please check your plan and billing details.'\n    , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] | 
| 
	from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
    AuthenticationError,
    InvalidRequestError,
    RateLimitError,
    ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
    try:
        return parser.parse(completion)
    except OutputParserException as e:
        raise OpenAIError(
            e, "There was an issue parsing the response from the AI model."
        )
async def openai_error_handler(
    func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
    try:
        return await func(*args, **kwargs)
    except ServiceUnavailableError as e:
        raise OpenAIError(
            e,
            "OpenAI is experiencing issues. Visit "
            "https://status.openai.com/ for more info.",
            should_log=not settings.custom_api_key,
        )
    except InvalidRequestError as e:
        if e.user_message.startswith("The model:"):
            raise OpenAIError(
                e,
                f"Your API key does not have access to your current model. Please use a different model.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except AuthenticationError as e:
        raise OpenAIError(
            e,
            "Authentication error: Ensure a valid API key is being used.",
            should_log=not settings.custom_api_key,
        )
    except RateLimitError as e:
        if e.user_message.startswith("You exceeded your current quota"):
            raise OpenAIError(
                e,
                f"Your API key exceeded your current quota, please check your plan and billing details.",
                should_log=not settings.custom_api_key,
            )
        raise OpenAIError(e, e.user_message)
    except Exception as e:
        raise OpenAIError(
            e, "There was an unexpected issue getting a response from the AI model."
        )
async def call_model_with_handling(
    model: BaseChatModel,
    prompt: BasePromptTemplate,
    args: Dict[str, str],
    settings: ModelSettings,
    **kwargs: Any,
) -> str:
    chain = LLMChain(llm=model, prompt=prompt)
    return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
 | 
	[
  "langchain.LLMChain"
] | 
	[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n    , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    'Authentication error: Ensure a valid API key is being used.',\n    should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n    'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key does not have access to your current model. Please use a different model.'\n    , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n    f'Your API key exceeded your current quota, please check your plan and billing details.'\n    , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] | 
| 
	import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import  make_archive, copy_templates
from utils.tex_processing import create_copies
from utils.gpt_interaction import GPTModel
from utils.prompts import SYSTEM
from utils.embeddings import EMBEDDINGS
from utils.gpt_interaction import get_gpt_responses
TOTAL_TOKENS = 0
TOTAL_PROMPTS_TOKENS = 0
TOTAL_COMPLETION_TOKENS = 0
def log_usage(usage, generating_target, print_out=True):
    global TOTAL_TOKENS
    global TOTAL_PROMPTS_TOKENS
    global TOTAL_COMPLETION_TOKENS
    prompts_tokens = usage['prompt_tokens']
    completion_tokens = usage['completion_tokens']
    total_tokens = usage['total_tokens']
    TOTAL_TOKENS += total_tokens
    TOTAL_PROMPTS_TOKENS += prompts_tokens
    TOTAL_COMPLETION_TOKENS += completion_tokens
    message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
              f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
              f"{TOTAL_TOKENS} tokens have been used in total."
    if print_out:
        print(message)
    logging.info(message)
def _generation_setup(title,  template="Default",
                      tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,  # generating references
                      knowledge_database=None, max_tokens_kd=2048, query_counts=10):
    llm = GPTModel(model="gpt-3.5-turbo-16k")
    bibtex_path, destination_folder = copy_templates(template, title)
    logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
    #generate key words
    keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
    log_usage(usage, "keywords")
    keywords = {keyword: max_kw_refs for keyword in keywords}
    print("Keywords: \n", keywords)
    #generate references
    ref = References(title, bib_refs)
    ref.collect_papers(keywords, tldr=tldr)
    references = ref.to_prompts(max_tokens=max_tokens_ref)
    all_paper_ids = ref.to_bibtex(bibtex_path)
    #product domain knowledge
    prompts = f"Title: {title}"
    preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
    # check if the database exists or not
    db_path = f"utils/knowledge_databases/{knowledge_database}"
    db_config_path = os.path.join(db_path, "db_meta.json")
    db_index_path = os.path.join(db_path, "faiss_index")
    if os.path.isdir(db_path):
        try:
            with open(db_config_path, "r", encoding="utf-8") as f:
                db_config = json.load(f)
            model_name = db_config["embedding_model"]
            embeddings = EMBEDDINGS[model_name]
            db = FAISS.load_local(db_index_path, embeddings)
            knowledge = Knowledge(db=db)
            knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
            domain_knowledge = knowledge.to_prompts(max_tokens_kd)
        except Exception as e:
            domain_knowledge=''
    prompts = f"Title: {title}"
    syetem_promot =  "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format."
    components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True)
    log_usage(usage, "media")
    print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
    paper = {}
    paper["title"] = title
    paper["references"] = references
    paper["bibtex"] = bibtex_path
    paper["components"] = components
    paper["domain_knowledge"] = domain_knowledge
    return paper, destination_folder, all_paper_ids
def section_generation(paper, section, save_to_path, model, research_field="machine learning"):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    title = paper["title"]
    references = paper["references"]
    components = paper['components']
    instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.'
    fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n"
    instruction_subprompt = "\n" \
                            "Your response should follow the following instructions:\n" \
                            "{instruction}\n"
    ref_instruction_subprompt = "- Read references. " \
                                "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
                                "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
                                "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \
                                "- Avoid citing the same reference in a same paragraph.\n" \
                                "\n" \
                                "References:\n" \
                                "{references}"
    output_subprompt = "Ensure that it can be directly compiled by LeTaX."
    reivew_prompts = PromptTemplate(
        input_variables=["title", "components", "instruction", "section", "references"],
        template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt)
    prompts = reivew_prompts.format(title=title,
                                    components=components,
                                    instruction=instruction,
                                    section=section,
                                    references=references)
    SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
                                               template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." )
    output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts,
                                      model=model, temperature=0.4)
    output=output[25:]
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    with open(tex_file, "w", encoding="utf-8") as f:
        f.write(output)
    use_md =True
    use_chinese = True
    if use_md:
        system_md = 'You are an translator between the  LaTeX and .MD. here is a latex file where the content is: \n \n ' + output
        prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title']
        output_md, usage_md = get_gpt_responses(system_md, prompts_md,
                                          model=model, temperature=0.4)
        md_file = os.path.join(save_to_path, f"{'survey'}.md")
        with open(md_file, "w", encoding="utf-8") as m:
            m.write(output_md)
        if use_chinese == True:
            system_md_chi = 'You are an translator between the  english and chinese. here is a english file where the content is: \n \n ' + output
            prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.'
            output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi,
                                                    model=model, temperature=0.4)
            md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md")
            with open(md_file_chi, "w", encoding="utf-8") as c:
                c.write(output_md_chi)
    return usage
def generate_draft(title,  tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,
                   knowledge_database=None, max_tokens_kd=2048, query_counts=10,
                   section='related works', model="gpt-3.5-turbo-16k", template="Default"
                   , save_zip=None):
    print("================START================")
    paper, destination_folder, _ = _generation_setup(title,  template, tldr, max_kw_refs, bib_refs,
                                                     max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
                                                     query_counts=query_counts,
                                                     knowledge_database=knowledge_database)
    # main components
    print(f"================PROCESSING================")
    usage = section_generation(paper, section, destination_folder, model=model)
    log_usage(usage, section)
    create_copies(destination_folder)
    print("\nPROCESSING COMPLETE\n")
    return make_archive(destination_folder, title+".zip")
    print("draft has been generated in " + destination_folder)
if __name__ == "__main__":
    import openai
    openai.api_key = "your key"
    openai.api_base = 'https://api.openai.com/v1'
    
    #openai.proxy = "socks5h://localhost:7890 # if use the vpn
    target_title = "Reinforcement Learning for Robot Control"
    generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
 | 
	[
  "langchain.vectorstores.FAISS.load_local",
  "langchain.PromptTemplate"
] | 
	[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n    output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n    'section', 'references'], template=fundamental_subprompt +\n    instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n    'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n    )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')] | 
| 
	import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import  make_archive, copy_templates
from utils.tex_processing import create_copies
from utils.gpt_interaction import GPTModel
from utils.prompts import SYSTEM
from utils.embeddings import EMBEDDINGS
from utils.gpt_interaction import get_gpt_responses
TOTAL_TOKENS = 0
TOTAL_PROMPTS_TOKENS = 0
TOTAL_COMPLETION_TOKENS = 0
def log_usage(usage, generating_target, print_out=True):
    global TOTAL_TOKENS
    global TOTAL_PROMPTS_TOKENS
    global TOTAL_COMPLETION_TOKENS
    prompts_tokens = usage['prompt_tokens']
    completion_tokens = usage['completion_tokens']
    total_tokens = usage['total_tokens']
    TOTAL_TOKENS += total_tokens
    TOTAL_PROMPTS_TOKENS += prompts_tokens
    TOTAL_COMPLETION_TOKENS += completion_tokens
    message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
              f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
              f"{TOTAL_TOKENS} tokens have been used in total."
    if print_out:
        print(message)
    logging.info(message)
def _generation_setup(title,  template="Default",
                      tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,  # generating references
                      knowledge_database=None, max_tokens_kd=2048, query_counts=10):
    llm = GPTModel(model="gpt-3.5-turbo-16k")
    bibtex_path, destination_folder = copy_templates(template, title)
    logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
    #generate key words
    keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
    log_usage(usage, "keywords")
    keywords = {keyword: max_kw_refs for keyword in keywords}
    print("Keywords: \n", keywords)
    #generate references
    ref = References(title, bib_refs)
    ref.collect_papers(keywords, tldr=tldr)
    references = ref.to_prompts(max_tokens=max_tokens_ref)
    all_paper_ids = ref.to_bibtex(bibtex_path)
    #product domain knowledge
    prompts = f"Title: {title}"
    preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
    # check if the database exists or not
    db_path = f"utils/knowledge_databases/{knowledge_database}"
    db_config_path = os.path.join(db_path, "db_meta.json")
    db_index_path = os.path.join(db_path, "faiss_index")
    if os.path.isdir(db_path):
        try:
            with open(db_config_path, "r", encoding="utf-8") as f:
                db_config = json.load(f)
            model_name = db_config["embedding_model"]
            embeddings = EMBEDDINGS[model_name]
            db = FAISS.load_local(db_index_path, embeddings)
            knowledge = Knowledge(db=db)
            knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
            domain_knowledge = knowledge.to_prompts(max_tokens_kd)
        except Exception as e:
            domain_knowledge=''
    prompts = f"Title: {title}"
    syetem_promot =  "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format."
    components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True)
    log_usage(usage, "media")
    print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
    paper = {}
    paper["title"] = title
    paper["references"] = references
    paper["bibtex"] = bibtex_path
    paper["components"] = components
    paper["domain_knowledge"] = domain_knowledge
    return paper, destination_folder, all_paper_ids
def section_generation(paper, section, save_to_path, model, research_field="machine learning"):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    title = paper["title"]
    references = paper["references"]
    components = paper['components']
    instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.'
    fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n"
    instruction_subprompt = "\n" \
                            "Your response should follow the following instructions:\n" \
                            "{instruction}\n"
    ref_instruction_subprompt = "- Read references. " \
                                "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
                                "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
                                "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \
                                "- Avoid citing the same reference in a same paragraph.\n" \
                                "\n" \
                                "References:\n" \
                                "{references}"
    output_subprompt = "Ensure that it can be directly compiled by LeTaX."
    reivew_prompts = PromptTemplate(
        input_variables=["title", "components", "instruction", "section", "references"],
        template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt)
    prompts = reivew_prompts.format(title=title,
                                    components=components,
                                    instruction=instruction,
                                    section=section,
                                    references=references)
    SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
                                               template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." )
    output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts,
                                      model=model, temperature=0.4)
    output=output[25:]
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    with open(tex_file, "w", encoding="utf-8") as f:
        f.write(output)
    use_md =True
    use_chinese = True
    if use_md:
        system_md = 'You are an translator between the  LaTeX and .MD. here is a latex file where the content is: \n \n ' + output
        prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title']
        output_md, usage_md = get_gpt_responses(system_md, prompts_md,
                                          model=model, temperature=0.4)
        md_file = os.path.join(save_to_path, f"{'survey'}.md")
        with open(md_file, "w", encoding="utf-8") as m:
            m.write(output_md)
        if use_chinese == True:
            system_md_chi = 'You are an translator between the  english and chinese. here is a english file where the content is: \n \n ' + output
            prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.'
            output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi,
                                                    model=model, temperature=0.4)
            md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md")
            with open(md_file_chi, "w", encoding="utf-8") as c:
                c.write(output_md_chi)
    return usage
def generate_draft(title,  tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,
                   knowledge_database=None, max_tokens_kd=2048, query_counts=10,
                   section='related works', model="gpt-3.5-turbo-16k", template="Default"
                   , save_zip=None):
    print("================START================")
    paper, destination_folder, _ = _generation_setup(title,  template, tldr, max_kw_refs, bib_refs,
                                                     max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
                                                     query_counts=query_counts,
                                                     knowledge_database=knowledge_database)
    # main components
    print(f"================PROCESSING================")
    usage = section_generation(paper, section, destination_folder, model=model)
    log_usage(usage, section)
    create_copies(destination_folder)
    print("\nPROCESSING COMPLETE\n")
    return make_archive(destination_folder, title+".zip")
    print("draft has been generated in " + destination_folder)
if __name__ == "__main__":
    import openai
    openai.api_key = "your key"
    openai.api_base = 'https://api.openai.com/v1'
    
    #openai.proxy = "socks5h://localhost:7890 # if use the vpn
    target_title = "Reinforcement Learning for Robot Control"
    generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
 | 
	[
  "langchain.vectorstores.FAISS.load_local",
  "langchain.PromptTemplate"
] | 
	[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n    output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n    'section', 'references'], template=fundamental_subprompt +\n    instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n    'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n    )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')] | 
| 
	import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import  make_archive, copy_templates
from utils.tex_processing import create_copies
from utils.gpt_interaction import GPTModel
from utils.prompts import SYSTEM
from utils.embeddings import EMBEDDINGS
from utils.gpt_interaction import get_gpt_responses
TOTAL_TOKENS = 0
TOTAL_PROMPTS_TOKENS = 0
TOTAL_COMPLETION_TOKENS = 0
def log_usage(usage, generating_target, print_out=True):
    global TOTAL_TOKENS
    global TOTAL_PROMPTS_TOKENS
    global TOTAL_COMPLETION_TOKENS
    prompts_tokens = usage['prompt_tokens']
    completion_tokens = usage['completion_tokens']
    total_tokens = usage['total_tokens']
    TOTAL_TOKENS += total_tokens
    TOTAL_PROMPTS_TOKENS += prompts_tokens
    TOTAL_COMPLETION_TOKENS += completion_tokens
    message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
              f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
              f"{TOTAL_TOKENS} tokens have been used in total."
    if print_out:
        print(message)
    logging.info(message)
def _generation_setup(title,  template="Default",
                      tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,  # generating references
                      knowledge_database=None, max_tokens_kd=2048, query_counts=10):
    llm = GPTModel(model="gpt-3.5-turbo-16k")
    bibtex_path, destination_folder = copy_templates(template, title)
    logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
    #generate key words
    keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
    log_usage(usage, "keywords")
    keywords = {keyword: max_kw_refs for keyword in keywords}
    print("Keywords: \n", keywords)
    #generate references
    ref = References(title, bib_refs)
    ref.collect_papers(keywords, tldr=tldr)
    references = ref.to_prompts(max_tokens=max_tokens_ref)
    all_paper_ids = ref.to_bibtex(bibtex_path)
    #product domain knowledge
    prompts = f"Title: {title}"
    preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
    # check if the database exists or not
    db_path = f"utils/knowledge_databases/{knowledge_database}"
    db_config_path = os.path.join(db_path, "db_meta.json")
    db_index_path = os.path.join(db_path, "faiss_index")
    if os.path.isdir(db_path):
        try:
            with open(db_config_path, "r", encoding="utf-8") as f:
                db_config = json.load(f)
            model_name = db_config["embedding_model"]
            embeddings = EMBEDDINGS[model_name]
            db = FAISS.load_local(db_index_path, embeddings)
            knowledge = Knowledge(db=db)
            knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
            domain_knowledge = knowledge.to_prompts(max_tokens_kd)
        except Exception as e:
            domain_knowledge=''
    prompts = f"Title: {title}"
    syetem_promot =  "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format."
    components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True)
    log_usage(usage, "media")
    print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
    paper = {}
    paper["title"] = title
    paper["references"] = references
    paper["bibtex"] = bibtex_path
    paper["components"] = components
    paper["domain_knowledge"] = domain_knowledge
    return paper, destination_folder, all_paper_ids
def section_generation(paper, section, save_to_path, model, research_field="machine learning"):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    title = paper["title"]
    references = paper["references"]
    components = paper['components']
    instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.'
    fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n"
    instruction_subprompt = "\n" \
                            "Your response should follow the following instructions:\n" \
                            "{instruction}\n"
    ref_instruction_subprompt = "- Read references. " \
                                "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
                                "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
                                "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \
                                "- Avoid citing the same reference in a same paragraph.\n" \
                                "\n" \
                                "References:\n" \
                                "{references}"
    output_subprompt = "Ensure that it can be directly compiled by LeTaX."
    reivew_prompts = PromptTemplate(
        input_variables=["title", "components", "instruction", "section", "references"],
        template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt)
    prompts = reivew_prompts.format(title=title,
                                    components=components,
                                    instruction=instruction,
                                    section=section,
                                    references=references)
    SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
                                               template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." )
    output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts,
                                      model=model, temperature=0.4)
    output=output[25:]
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    with open(tex_file, "w", encoding="utf-8") as f:
        f.write(output)
    use_md =True
    use_chinese = True
    if use_md:
        system_md = 'You are an translator between the  LaTeX and .MD. here is a latex file where the content is: \n \n ' + output
        prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title']
        output_md, usage_md = get_gpt_responses(system_md, prompts_md,
                                          model=model, temperature=0.4)
        md_file = os.path.join(save_to_path, f"{'survey'}.md")
        with open(md_file, "w", encoding="utf-8") as m:
            m.write(output_md)
        if use_chinese == True:
            system_md_chi = 'You are an translator between the  english and chinese. here is a english file where the content is: \n \n ' + output
            prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.'
            output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi,
                                                    model=model, temperature=0.4)
            md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md")
            with open(md_file_chi, "w", encoding="utf-8") as c:
                c.write(output_md_chi)
    return usage
def generate_draft(title,  tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,
                   knowledge_database=None, max_tokens_kd=2048, query_counts=10,
                   section='related works', model="gpt-3.5-turbo-16k", template="Default"
                   , save_zip=None):
    print("================START================")
    paper, destination_folder, _ = _generation_setup(title,  template, tldr, max_kw_refs, bib_refs,
                                                     max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
                                                     query_counts=query_counts,
                                                     knowledge_database=knowledge_database)
    # main components
    print(f"================PROCESSING================")
    usage = section_generation(paper, section, destination_folder, model=model)
    log_usage(usage, section)
    create_copies(destination_folder)
    print("\nPROCESSING COMPLETE\n")
    return make_archive(destination_folder, title+".zip")
    print("draft has been generated in " + destination_folder)
if __name__ == "__main__":
    import openai
    openai.api_key = "your key"
    openai.api_base = 'https://api.openai.com/v1'
    
    #openai.proxy = "socks5h://localhost:7890 # if use the vpn
    target_title = "Reinforcement Learning for Robot Control"
    generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
 | 
	[
  "langchain.vectorstores.FAISS.load_local",
  "langchain.PromptTemplate"
] | 
	[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n    output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n    'section', 'references'], template=fundamental_subprompt +\n    instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n    'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n    )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')] | 
| 
	import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural'))
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
AUDIO_CHATGPT_PREFIX = """AudioGPT
AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files. 
AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated.
Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. 
TOOLS:
------
AudioGPT has access to the following tools:"""
AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists.
You will remember to provide the audio file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Thought: Do I need to use a tool? {agent_scratchpad}"""
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
    tokens = history_memory.split()
    n_tokens = len(tokens)
    print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
    if n_tokens < keep_last_n_words:
        return history_memory
    else:
        paragraphs = history_memory.split('\n')
        last_n_tokens = n_tokens
        while last_n_tokens >= keep_last_n_words:
            last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
            paragraphs = paragraphs[1:]
        return '\n' + '\n'.join(paragraphs)
def merge_audio(audio_path_1, audio_path_2):
    merged_signal = []
    sr_1, signal_1 = wavfile.read(audio_path_1)
    sr_2, signal_2 = wavfile.read(audio_path_2)
    merged_signal.append(signal_1)
    merged_signal.append(signal_2)
    merged_signal = np.hstack(merged_signal)
    merged_signal = np.asarray(merged_signal, dtype=np.int16)
    audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
    wavfile.write(audio_filename, sr_2, merged_signal)
    return audio_filename
class T2I:
    def __init__(self, device):
        from transformers import AutoModelForCausalLM, AutoTokenizer
        from diffusers import StableDiffusionPipeline
        from transformers import pipeline
        print("Initializing T2I to %s" % device)
        self.device = device
        self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
        self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
        self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
        self.pipe.to(device)
    def inference(self, text):
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
        print(f'{text} refined to {refined_text}')
        image = self.pipe(refined_text).images[0]
        image.save(image_filename)
        print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
        return image_filename
class ImageCaptioning:
    def __init__(self, device):
        from transformers import BlipProcessor, BlipForConditionalGeneration
        print("Initializing ImageCaptioning to %s" % device)
        self.device = device
        self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
        self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
    def inference(self, image_path):
        inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
        out = self.model.generate(**inputs)
        captions = self.processor.decode(out[0], skip_special_tokens=True)
        return captions
class T2A:
    def __init__(self, device):
        print("Initializing Make-An-Audio to %s" % device)
        self.device = device
        self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device)
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
    def _initialize_model(self, config, ckpt, device):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        model = model.to(device)
        model.cond_stage_model.to(model.device)
        model.cond_stage_model.device = model.device
        sampler = DDIMSampler(model)
        return sampler
    def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
        SAMPLE_RATE = 16000
        prng = np.random.RandomState(seed)
        start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
        c = self.sampler.model.get_learned_conditioning(n_samples * [text])
        shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8]  # (z_dim, 80//2^x, 848//2^x)
        samples_ddim, _ = self.sampler.sample(S = ddim_steps,
                                            conditioning = c,
                                            batch_size = n_samples,
                                            shape = shape,
                                            verbose = False,
                                            unconditional_guidance_scale = scale,
                                            unconditional_conditioning = uc,
                                            x_T = start_code)
        x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
        x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
        wav_list = []
        for idx,spec in enumerate(x_samples_ddim):
            wav = self.vocoder.vocode(spec)
            wav_list.append((SAMPLE_RATE,wav))
        best_wav = self.select_best_audio(text, wav_list)
        return best_wav
    def select_best_audio(self, prompt, wav_list):
        from wav_evaluation.models.CLAPWrapper import CLAPWrapper
        clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml',
                                 use_cuda=torch.cuda.is_available())
        text_embeddings = clap_model.get_text_embeddings([prompt])
        score_list = []
        for data in wav_list:
            sr, wav = data
            audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True)
            score = clap_model.compute_similarity(audio_embeddings, text_embeddings,
                                                  use_logit_scale=False).squeeze().cpu().numpy()
            score_list.append(score)
        max_index = np.array(score_list).argmax()
        print(score_list, max_index)
        return wav_list[max_index]
    def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
        melbins,mel_len = 80,624
        with torch.no_grad():
            result = self.txt2audio(
                text = text,
                H = melbins,
                W = mel_len
            )
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, result[1], samplerate = 16000)
        print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}")
        return audio_filename
class I2A:
    def __init__(self, device):
        print("Initializing Make-An-Audio-Image to %s" % device)
        self.device = device
        self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device)
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
    def _initialize_model(self, config, ckpt, device):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        model = model.to(device)
        model.cond_stage_model.to(model.device)
        model.cond_stage_model.device = model.device
        sampler = DDIMSampler(model)
        return sampler
    def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
        SAMPLE_RATE = 16000
        n_samples = 1 # only support 1 sample
        prng = np.random.RandomState(seed)
        start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
        #image = Image.fromarray(image)
        image = Image.open(image)
        image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0)
        image_embedding = self.sampler.model.cond_stage_model.forward_img(image)
        c = image_embedding.repeat(n_samples, 1, 1)
        shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8]  # (z_dim, 80//2^x, 848//2^x)
        samples_ddim, _ = self.sampler.sample(S=ddim_steps,
                                            conditioning=c,
                                            batch_size=n_samples,
                                            shape=shape,
                                            verbose=False,
                                            unconditional_guidance_scale=scale,
                                            unconditional_conditioning=uc,
                                            x_T=start_code)
        x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
        x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
        wav_list = []
        for idx,spec in enumerate(x_samples_ddim):
            wav = self.vocoder.vocode(spec)
            wav_list.append((SAMPLE_RATE,wav))
        best_wav = wav_list[0]
        return best_wav
    def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
        melbins,mel_len = 80,624
        with torch.no_grad():
            result = self.img2audio(
                image=image,
                H=melbins,
                W=mel_len
            )
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, result[1], samplerate = 16000)
        print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}")
        return audio_filename
class TTS:
    def __init__(self, device=None):
        from inference.tts.PortaSpeech import TTSInference
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing PortaSpeech to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/ps_adv_baseline'
        self.set_model_hparams()
        self.inferencer = TTSInference(self.hp, device)
    def set_model_hparams(self):
        set_hparams(exp_name=self.exp_name, print_hparams=False)
        self.hp = hp
    def inference(self, text):
        self.set_model_hparams()
        inp = {"text": text}
        out = self.inferencer.infer_once(inp)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, out, samplerate=22050)
        return audio_filename
class T2S:
    def __init__(self, device= None):
        from inference.svs.ds_e2e import DiffSingerE2EInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing DiffSinger to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/0831_opencpop_ds1000'
        self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml'
        self.set_model_hparams()
        self.pipe = DiffSingerE2EInfer(self.hp, device)
        self.default_inp = {
            'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
            'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
            'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
        }
    def set_model_hparams(self):
        set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
        self.hp = hp
    def inference(self, inputs):
        self.set_model_hparams()
        val = inputs.split(",")
        key = ['text', 'notes', 'notes_duration']
        try:
            inp = {k: v for k, v in zip(key, val)}
            wav = self.pipe.infer_once(inp)
        except:
            print('Error occurs. Generate default audio sample.\n')
            inp = self.default_inp
            wav = self.pipe.infer_once(inp)
        #if inputs == '' or len(val) < len(key):
        #    inp = self.default_inp
        #else:
        #    inp = {k:v for k,v in zip(key,val)}
        #wav = self.pipe.infer_once(inp)
        wav *= 32767
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
        print(f"Processed T2S.run, audio_filename: {audio_filename}")
        return audio_filename
class t2s_VISinger:
    def __init__(self, device=None):
        from espnet2.bin.svs_inference import SingingGenerate
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing VISingere to %s" % device)
        tag = 'AQuarterMile/opencpop_visinger1'
        self.model = SingingGenerate.from_pretrained(
            model_tag=str_or_none(tag),
            device=device,
        )
        phn_dur = [[0.        , 0.219     ],
            [0.219     , 0.50599998],
            [0.50599998, 0.71399999],
            [0.71399999, 1.097     ],
            [1.097     , 1.28799999],
            [1.28799999, 1.98300004],
            [1.98300004, 7.10500002],
            [7.10500002, 7.60400009]]
        phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP']
        score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']]
        tempo = 70
        tmp = {}
        tmp["label"] = phn_dur, phn
        tmp["score"] = tempo, score
        self.default_inp = tmp
    def inference(self, inputs):
        val = inputs.split(",")
        key = ['text', 'notes', 'notes_duration']
        try: # TODO: input will be update
            inp = {k: v for k, v in zip(key, val)}
            wav = self.model(text=inp)["wav"]
        except:
            print('Error occurs. Generate default audio sample.\n')
            inp = self.default_inp
            wav = self.model(text=inp)["wav"]
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, wav, samplerate=self.model.fs)
        return audio_filename
class TTS_OOD:
    def __init__(self, device):
        from inference.tts.GenerSpeech import GenerSpeechInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing GenerSpeech to %s" % device)
        self.device = device
        self.exp_name = 'checkpoints/GenerSpeech'
        self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml'
        self.set_model_hparams()
        self.pipe = GenerSpeechInfer(self.hp, device)
    def set_model_hparams(self):
        set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
        f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy'
        if os.path.exists(f0_stats_fn):
            hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn)
            hp['f0_mean'] = float(hp['f0_mean'])
            hp['f0_std'] = float(hp['f0_std'])
        hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt'
        self.hp = hp
    def inference(self, inputs):
        self.set_model_hparams()
        key = ['ref_audio', 'text']
        val = inputs.split(",")
        inp = {k: v for k, v in zip(key, val)}
        wav = self.pipe.infer_once(inp)
        wav *= 32767
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
        print(
            f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}")
        return audio_filename
class Inpaint:
    def __init__(self, device):
        print("Initializing Make-An-Audio-inpaint to %s" % device)
        self.device = device
        self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt')
        self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
        self.cmap_transform = matplotlib.cm.viridis
    def _initialize_model_inpaint(self, config, ckpt):
        config = OmegaConf.load(config)
        model = instantiate_from_config(config.model)
        model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        model = model.to(device)
        print(model.device, device, model.cond_stage_model.device)
        sampler = DDIMSampler(model)
        return sampler
    def make_batch_sd(self, mel, mask, num_samples=1):
        mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
        mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
        masked_mel = (1 - mask) * mel
        mel = mel * 2 - 1
        mask = mask * 2 - 1
        masked_mel = masked_mel * 2 -1
        batch = {
             "mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
             "mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples),
             "masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
        }
        return batch
    def gen_mel(self, input_audio_path):
        SAMPLE_RATE = 16000
        sr, ori_wav = wavfile.read(input_audio_path)
        print("gen_mel")
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
        if len(ori_wav.shape)==2:# stereo
            ori_wav = librosa.to_mono(ori_wav.T)
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
        mel_len,hop_size = 848,256
        input_len = mel_len * hop_size
        if len(ori_wav) < input_len:
            input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
        else:
            input_wav = ori_wav[:input_len]
        mel = TRANSFORMS_16000(input_wav)
        return mel
    def gen_mel_audio(self, input_audio):
        SAMPLE_RATE = 16000
        sr,ori_wav = input_audio
        print("gen_mel_audio")
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
        if len(ori_wav.shape)==2:# stereo
            ori_wav = librosa.to_mono(ori_wav.T)
        print(sr,ori_wav.shape,ori_wav)
        ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
        mel_len,hop_size = 848,256
        input_len = mel_len * hop_size
        if len(ori_wav) < input_len:
            input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
        else:
            input_wav = ori_wav[:input_len]
        mel = TRANSFORMS_16000(input_wav)
        return mel
    def show_mel_fn(self, input_audio_path):
        crop_len = 500
        crop_mel = self.gen_mel(input_audio_path)[:,:crop_len]
        color_mel = self.cmap_transform(crop_mel)
        image = Image.fromarray((color_mel*255).astype(np.uint8))
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        image.save(image_filename)
        return image_filename
    def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
        model = self.sampler.model
        prng = np.random.RandomState(seed)
        start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
        start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
        c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
        cc = torch.nn.functional.interpolate(batch["mask"],
                                                size=c.shape[-2:])
        c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
        shape = (c.shape[1]-1,)+c.shape[2:]
        samples_ddim, _ = self.sampler.sample(S=ddim_steps,
                                            conditioning=c,
                                            batch_size=c.shape[0],
                                            shape=shape,
                                            verbose=False)
        x_samples_ddim = model.decode_first_stage(samples_ddim)
        mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
        mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
        predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
        inpainted = (1-mask)*mel+mask*predicted_mel
        inpainted = inpainted.cpu().numpy().squeeze()
        inapint_wav = self.vocoder.vocode(inpainted)
        return inpainted, inapint_wav
    def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100):
        SAMPLE_RATE = 16000
        torch.set_grad_enabled(False)
        mel_img = Image.open(mel_and_mask['image'])
        mask_img = Image.open(mel_and_mask["mask"])
        show_mel = np.array(mel_img.convert("L"))/255
        mask = np.array(mask_img.convert("L"))/255
        mel_bins,mel_len = 80,848
        input_mel = self.gen_mel_audio(input_audio)[:,:mel_len]
        mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)
        print(mask.shape,input_mel.shape)
        with torch.no_grad():
            batch = self.make_batch_sd(input_mel,mask,num_samples=1)
            inpainted,gen_wav = self.inpaint(
                batch=batch,
                seed=seed,
                ddim_steps=ddim_steps,
                num_samples=1,
                H=mel_bins, W=mel_len
            )
        inpainted = inpainted[:,:show_mel.shape[1]]
        color_mel = self.cmap_transform(inpainted)
        input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
        gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
        image = Image.fromarray((color_mel*255).astype(np.uint8))
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        image.save(image_filename)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        soundfile.write(audio_filename, gen_wav, samplerate = 16000)
        return image_filename, audio_filename
    
class ASR:
    def __init__(self, device):
        print("Initializing Whisper to %s" % device)
        self.device = device
        self.model = whisper.load_model("base", device=device)
    def inference(self, audio_path):
        audio = whisper.load_audio(audio_path)
        audio = whisper.pad_or_trim(audio)
        mel = whisper.log_mel_spectrogram(audio).to(self.device)
        _, probs = self.model.detect_language(mel)
        options = whisper.DecodingOptions()
        result = whisper.decode(self.model, mel, options)
        return result.text
    def translate_english(self, audio_path):
        audio = self.model.transcribe(audio_path, language='English')
        return audio['text']
class A2T:
    def __init__(self, device):
        from audio_to_text.inference_waveform import AudioCapModel
        print("Initializing Audio-To-Text Model to %s" % device)
        self.device = device
        self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm")
    def inference(self, audio_path):
        audio = whisper.load_audio(audio_path)
        caption_text = self.model(audio)
        return caption_text[0]
class GeneFace:
    def __init__(self, device=None):
        print("Initializing GeneFace model to %s" % device)
        from audio_to_face.GeneFace_binding import GeneFaceInfer
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = device
        self.geneface_model = GeneFaceInfer(device)
        print("Loaded GeneFace model")
    def inference(self, audio_path):
        audio_base_name = os.path.basename(audio_path)[:-4]
        out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4")
        inp = {
            'audio_source_name': audio_path,
            'out_npy_name': f'geneface/tmp/{audio_base_name}.npy',
            'cond_name': f'geneface/tmp/{audio_base_name}.npy',
            'out_video_name': out_video_name,
            'tmp_imgs_dir': f'video/tmp_imgs',
        }
        self.geneface_model.infer_once(inp)
        return out_video_name
class SoundDetection:
    def __init__(self, device):
        self.device = device
        self.sample_rate = 32000
        self.window_size = 1024
        self.hop_size = 320
        self.mel_bins = 64
        self.fmin = 50
        self.fmax = 14000
        self.model_type = 'PVT'
        self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth'
        self.classes_num = detection_config.classes_num
        self.labels = detection_config.labels
        self.frames_per_second = self.sample_rate // self.hop_size
        # Model = eval(self.model_type)
        self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size, 
            hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax, 
            classes_num=self.classes_num)
        checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model'])
        self.model.to(device)
    def inference(self, audio_path):
        # Forward
        (waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True)
        waveform = waveform[None, :]    # (1, audio_length)
        waveform = torch.from_numpy(waveform)
        waveform = waveform.to(self.device)
        # Forward
        with torch.no_grad():
            self.model.eval()
            batch_output_dict = self.model(waveform, None)
        framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
        """(time_steps, classes_num)"""
        # print('Sound event detection result (time_steps x classes_num): {}'.format(
        #     framewise_output.shape))
        import numpy as np
        import matplotlib.pyplot as plt
        sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
        top_k = 10  # Show top results
        top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]    
        """(time_steps, top_k)"""
        # Plot result    
        stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size, 
            hop_length=self.hop_size, window='hann', center=True)
        frames_num = stft.shape[-1]
        fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
        axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
        axs[0].set_ylabel('Frequency bins')
        axs[0].set_title('Log spectrogram')
        axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
        axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second))
        axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second))
        axs[1].yaxis.set_ticks(np.arange(0, top_k))
        axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]])
        axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
        axs[1].set_xlabel('Seconds')
        axs[1].xaxis.set_ticks_position('bottom')
        plt.tight_layout()
        image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
        plt.savefig(image_filename)
        return image_filename
class SoundExtraction:
    def __init__(self, device):
        from sound_extraction.model.LASSNet import LASSNet
        from sound_extraction.utils.stft import STFT
        import torch.nn as nn
        self.device = device
        self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt'
        self.stft = STFT()
        self.model = nn.DataParallel(LASSNet(device)).to(device)
        checkpoint = torch.load(self.model_file)
        self.model.load_state_dict(checkpoint['model'])
        self.model.eval()
    def inference(self, inputs):
        #key = ['ref_audio', 'text']
        from sound_extraction.utils.wav_io import load_wav, save_wav
        val = inputs.split(",")
        audio_path = val[0] # audio_path, text
        text = val[1]
        waveform = load_wav(audio_path)
        waveform = torch.tensor(waveform).transpose(1,0)
        mixed_mag, mixed_phase = self.stft.transform(waveform)
        text_query = ['[CLS] ' + text]
        mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device)
        est_mask = self.model(mixed_mag, text_query)
        est_mag = est_mask * mixed_mag  
        est_mag = est_mag.squeeze(1)  
        est_mag = est_mag.permute(0, 2, 1) 
        est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase)
        est_wav = est_wav.squeeze(0).squeeze(0).numpy()  
        #est_path = f'output/est{i}.wav'
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        print('audio_filename ', audio_filename)
        save_wav(est_wav, audio_filename)
        return audio_filename
class Binaural:
    def __init__(self, device):
        from src.models import BinauralNetwork
        self.device = device
        self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net'
        self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions2.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions3.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions4.txt',
                              'mono2binaural/useful_ckpts/m2b/tx_positions5.txt']
        self.net = BinauralNetwork(view_dim=7,
                      warpnet_layers=4,
                      warpnet_channels=64,
                      )
        self.net.load_from_file(self.model_file)
        self.sr = 48000
    def inference(self, audio_path):
        mono, sr  = librosa.load(path=audio_path, sr=self.sr, mono=True)
        mono = torch.from_numpy(mono)
        mono = mono.unsqueeze(0)
        import numpy as np
        import random
        rand_int = random.randint(0,4)
        view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32)
        view = torch.from_numpy(view)
        if not view.shape[-1] * 400 == mono.shape[-1]:
            mono = mono[:,:(mono.shape[-1]//400)*400] # 
            if view.shape[1]*400 > mono.shape[1]:
                m_a = view.shape[1] - mono.shape[-1]//400 
                rand_st = random.randint(0,m_a)
                view = view[:,m_a:m_a+(mono.shape[-1]//400)] # 
        # binauralize and save output
        self.net.eval().to(self.device)
        mono, view = mono.to(self.device), view.to(self.device)
        chunk_size = 48000  # forward in chunks of 1s
        rec_field =  1000  # add 1000 samples as "safe bet" since warping has undefined rec. field
        rec_field -= rec_field % 400  # make sure rec_field is a multiple of 400 to match audio and view frequencies
        chunks = [
            {
                "mono": mono[:, max(0, i-rec_field):i+chunk_size],
                "view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
            }
            for i in range(0, mono.shape[-1], chunk_size)
        ]
        for i, chunk in enumerate(chunks):
            with torch.no_grad():
                mono = chunk["mono"].unsqueeze(0)
                view = chunk["view"].unsqueeze(0)
                binaural = self.net(mono, view).squeeze(0)
                if i > 0:
                    binaural = binaural[:, -(mono.shape[-1]-rec_field):]
                chunk["binaural"] = binaural
        binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1)
        binaural = torch.clamp(binaural, min=-1, max=1).cpu()
        #binaural = chunked_forwarding(net, mono, view)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        import torchaudio
        torchaudio.save(audio_filename, binaural, sr)
        #soundfile.write(audio_filename, binaural, samplerate = 48000)
        print(f"Processed Binaural.run, audio_filename: {audio_filename}")
        return audio_filename
class TargetSoundDetection:
    def __init__(self, device):
        from target_sound_detection.src import models as tsd_models
        from target_sound_detection.src.models import event_labels
        self.device = device
        self.MEL_ARGS = {
            'n_mels': 64,
            'n_fft': 2048,
            'hop_length': int(22050 * 20 / 1000),
            'win_length': int(22050 * 40 / 1000)
        }
        self.EPS = np.spacing(1)
        self.clip_model, _ = clip.load("ViT-B/32", device=self.device)
        self.event_labels = event_labels
        self.id_to_event =  {i : label for i, label in enumerate(self.event_labels)}
        config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu')
        config_parameters = dict(config)
        config_parameters['tao'] = 0.6
        if 'thres' not in config_parameters.keys():
            config_parameters['thres'] = 0.5
        if 'time_resolution' not in config_parameters.keys():
            config_parameters['time_resolution'] = 125
        model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'
                                        , map_location=lambda storage, loc: storage) # load parameter 
        self.model = getattr(tsd_models, config_parameters['model'])(config_parameters,
                    inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args'])
        self.model.load_state_dict(model_parameters)
        self.model = self.model.to(self.device).eval()
        self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')
        self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')
    def extract_feature(self, fname):
        import soundfile as sf
        y, sr = sf.read(fname, dtype='float32')
        print('y ', y.shape)
        ti = y.shape[0]/sr
        if y.ndim > 1:
            y = y.mean(1)
        y = librosa.resample(y, sr, 22050)
        lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T
        return lms_feature,ti
    
    def build_clip(self, text):
        text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"]
        text_features = self.clip_model.encode_text(text)
        return text_features
    
    def cal_similarity(self, target, retrievals):
        ans = []
        #target =torch.from_numpy(target)
        for name in retrievals.keys():
            tmp = retrievals[name]
            #tmp = torch.from_numpy(tmp)
            s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0)
            ans.append(s.item())
        return ans.index(max(ans))
    
    def inference(self, text, audio_path):
        from target_sound_detection.src.utils import median_filter, decode_with_timestamps
        target_emb = self.build_clip(text) # torch type
        idx = self.cal_similarity(target_emb, self.re_embeds)
        target_event = self.id_to_event[idx]
        embedding = self.ref_mel[target_event]
        embedding = torch.from_numpy(embedding)
        embedding = embedding.unsqueeze(0).to(self.device).float()
        #print('embedding ', embedding.shape)
        inputs,ti = self.extract_feature(audio_path)
        #print('ti ', ti)
        inputs = torch.from_numpy(inputs)
        inputs = inputs.unsqueeze(0).to(self.device).float()
        #print('inputs ', inputs.shape)
        decision, decision_up, logit = self.model(inputs, embedding)
        pred = decision_up.detach().cpu().numpy()
        pred = pred[:,:,0]
        frame_num = decision_up.shape[1]
        time_ratio = ti / frame_num
        filtered_pred = median_filter(pred, window_size=1, threshold=0.5)
        #print('filtered_pred ', filtered_pred)
        time_predictions = []
        for index_k in range(filtered_pred.shape[0]):
            decoded_pred = []
            decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:])
            if len(decoded_pred_) == 0: # neg deal
                decoded_pred_.append((target_event, 0, 0))
            decoded_pred.append(decoded_pred_)
            for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
                cur_pred = pred[num_batch]
                # Save each frame output, for later visualization
                label_prediction = decoded_pred[num_batch] # frame predict
                # print(label_prediction)
                for event_label, onset, offset in label_prediction:
                    time_predictions.append({
                        'onset': onset*time_ratio,
                        'offset': offset*time_ratio,})
        ans = ''
        for i,item in enumerate(time_predictions):
            ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + '  end_time: ' + str(item['offset']) + '\t'
        #print(ans)
        return ans
# class Speech_Enh_SS_SC:
#     """Speech Enhancement or Separation in single-channel
#     Example usage:
#         enh_model = Speech_Enh_SS("cuda")
#         enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
#     """
#     def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"):
#         self.model_name = model_name
#         self.device = device
#         print("Initializing ESPnet Enh to %s" % device)
#         self._initialize_model()
#     def _initialize_model(self):
#         from espnet_model_zoo.downloader import ModelDownloader
#         from espnet2.bin.enh_inference import SeparateSpeech
#         d = ModelDownloader()
#         cfg = d.download_and_unpack(self.model_name)
#         self.separate_speech = SeparateSpeech(
#             train_config=cfg["train_config"],
#             model_file=cfg["model_file"],
#             # for segment-wise process on long speech
#             segment_size=2.4,
#             hop_size=0.8,
#             normalize_segment_scale=False,
#             show_progressbar=True,
#             ref_channel=None,
#             normalize_output_wav=True,
#             device=self.device,
#         )
#     def inference(self, speech_path, ref_channel=0):
#         speech, sr = soundfile.read(speech_path)
#         speech = speech[:, ref_channel]
#         assert speech.dim() == 1
#         enh_speech = self.separate_speech(speech[None, ], fs=sr)
#         if len(enh_speech) == 1:
#             return enh_speech[0]
#         return enh_speech
# class Speech_Enh_SS_MC:
#     """Speech Enhancement or Separation in multi-channel"""
#     def __init__(self, device="cuda", model_name=None, ref_channel=4):
#         self.model_name = model_name
#         self.ref_channel = ref_channel
#         self.device = device
#         print("Initializing ESPnet Enh to %s" % device)
#         self._initialize_model()
#     def _initialize_model(self):
#         from espnet_model_zoo.downloader import ModelDownloader
#         from espnet2.bin.enh_inference import SeparateSpeech
#         d = ModelDownloader()
#         cfg = d.download_and_unpack(self.model_name)
#         self.separate_speech = SeparateSpeech(
#             train_config=cfg["train_config"],
#             model_file=cfg["model_file"],
#             # for segment-wise process on long speech
#             segment_size=2.4,
#             hop_size=0.8,
#             normalize_segment_scale=False,
#             show_progressbar=True,
#             ref_channel=self.ref_channel,
#             normalize_output_wav=True,
#             device=self.device,
#         )
#     def inference(self, speech_path):
#         speech, sr = soundfile.read(speech_path)
#         speech = speech.T
#         enh_speech = self.separate_speech(speech[None, ...], fs=sr)
#         if len(enh_speech) == 1:
#             return enh_speech[0]
#         return enh_speech
class Speech_Enh_SS_SC:
    """Speech Enhancement or Separation in single-channel
    Example usage:
        enh_model = Speech_Enh_SS("cuda")
        enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
    """
    def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"):
        self.model_name = model_name
        self.device = device
        print("Initializing ESPnet Enh to %s" % device)
        self._initialize_model()
    def _initialize_model(self):
        from espnet_model_zoo.downloader import ModelDownloader
        from espnet2.bin.enh_inference import SeparateSpeech
        d = ModelDownloader()
        cfg = d.download_and_unpack(self.model_name)
        self.separate_speech = SeparateSpeech(
            train_config=cfg["train_config"],
            model_file=cfg["model_file"],
            # for segment-wise process on long speech
            segment_size=2.4,
            hop_size=0.8,
            normalize_segment_scale=False,
            show_progressbar=True,
            ref_channel=None,
            normalize_output_wav=True,
            device=self.device,
        )
    def inference(self, speech_path, ref_channel=0):
        speech, sr = soundfile.read(speech_path)
        speech = speech[:, ref_channel]
        # speech = torch.from_numpy(speech)
        # assert speech.dim() == 1
        enh_speech = self.separate_speech(speech[None, ...], fs=sr)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        # if len(enh_speech) == 1:
        soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr)
            # return enh_speech[0]
        # return enh_speech
        # else: 
        #     print("############")
        #     audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        #     soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
        #     audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        #     soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
        #     audio_filename = merge_audio(audio_filename_1, audio_filename_2)
        return audio_filename
class Speech_SS:
    def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"):
        self.model_name = model_name
        self.device = device
        print("Initializing ESPnet SS to %s" % device)
        self._initialize_model()
    def _initialize_model(self):
        from espnet_model_zoo.downloader import ModelDownloader
        from espnet2.bin.enh_inference import SeparateSpeech
        d = ModelDownloader()
        cfg = d.download_and_unpack(self.model_name)
        self.separate_speech = SeparateSpeech(
            train_config=cfg["train_config"],
            model_file=cfg["model_file"],
            # for segment-wise process on long speech
            segment_size=2.4,
            hop_size=0.8,
            normalize_segment_scale=False,
            show_progressbar=True,
            ref_channel=None,
            normalize_output_wav=True,
            device=self.device,
        )
    def inference(self, speech_path):
        speech, sr = soundfile.read(speech_path)
        enh_speech = self.separate_speech(speech[None, ...], fs=sr)
        audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        if len(enh_speech) == 1:
            soundfile.write(audio_filename, enh_speech[0], samplerate=sr)
        else:
            # print("############")
            audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
            audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
            audio_filename = merge_audio(audio_filename_1, audio_filename_2)
        return audio_filename
class ConversationBot:
    def __init__(self):
        print("Initializing AudioGPT")
        self.llm = OpenAI(temperature=0)
        self.t2i = T2I(device="cuda:1")
        self.i2t = ImageCaptioning(device="cuda:0")
        self.t2a = T2A(device="cuda:0")
        self.tts = TTS(device="cpu")
        self.t2s = T2S(device="cpu")
        self.i2a = I2A(device="cuda:0")
        self.a2t = A2T(device="cpu")
        self.asr = ASR(device="cuda:0")
        self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0")
        # self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0")
        self.SS = Speech_SS(device="cuda:0")
        self.inpaint = Inpaint(device="cuda:0")
        self.tts_ood = TTS_OOD(device="cpu")
        self.geneface = GeneFace(device="cuda:0")
        self.detection = SoundDetection(device="cpu")
        self.binaural = Binaural(device="cuda:0")
        self.extraction = SoundExtraction(device="cuda:0")
        self.TSD = TargetSoundDetection(device="cuda:0")
        self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
    def init_tools(self, interaction_type):
        if interaction_type == 'text':
            self.tools = [
                Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
                     description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
                                 "The input to this tool should be a string, representing the text used to generate image. "),
                Tool(name="Get Photo Description", func=self.i2t.inference,
                     description="useful for when you want to know what is inside the photo. receives image_path as input. "
                                 "The input to this tool should be a string, representing the image_path. "),
                Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
                     description="useful for when you want to generate an audio from a user input text and it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to generate audio."),
                Tool(
                    name="Style Transfer", func= self.tts_ood.inference,
                    description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
                                "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
                                "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
                Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
                     description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
                                 "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
                                 "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
                                 "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
                                 "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
                Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
                     description="useful for when you want to convert a user input text into speech audio it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to be converted to speech."),
                # Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference,
                #      description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), "
                #                  "or separate each speech from the speech mixture (single-channel), receives audio_path as input."
                #                  "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference,
                     description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Speech Separation In Single-Channel", func=self.SS.inference,
                     description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                # Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference,
                #      description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input."
                #                  "The input to this tool should be a string, representing the audio_path."),                                 
                Tool(name="Generate Audio From The Image", func=self.i2a.inference,
                     description="useful for when you want to generate an audio based on an image."
                                  "The input to this tool should be a string, representing the image_path. "),
                Tool(name="Generate Text From The Audio", func=self.a2t.inference,
                     description="useful for when you want to describe an audio in text, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn,
                     description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, "
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Transcribe Speech", func=self.asr.inference,
                     description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
                     description="useful for when you want to generate a talking human portrait video given a input audio."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference,
                     description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
                     description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
                     description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
                                 "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
                Tool(name="Target Sound Detection", func=self.TSD.inference,
                     description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
                                 "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]       
            self.agent = initialize_agent(
                self.tools,
                self.llm,
                agent="conversational-react-description",
                verbose=True,
                memory=self.memory,
                return_intermediate_steps=True,
                agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
            return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
        else:
            self.tools = [
                Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
                     description="useful for when you want to generate an audio from a user input text and it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to generate audio."),
                Tool(
                    name="Style Transfer", func= self.tts_ood.inference,
                    description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
                                "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
                                "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
                Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
                     description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
                                 "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
                                 "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
                                 "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
                                 "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
                Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
                     description="useful for when you want to convert a user input text into speech audio it saved it to a file."
                                 "The input to this tool should be a string, representing the text used to be converted to speech."),
                Tool(name="Generate Text From The Audio", func=self.a2t.inference,
                     description="useful for when you want to describe an audio in text, receives audio_path as input."
                                 "The input to this tool should be a string, representing the audio_path."), 
                Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
                     description="useful for when you want to generate a talking human portrait video given a input audio."
                                 "The input to this tool should be a string, representing the audio_path."),
                Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
                     description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
                                 "The input to this tool should be a string, representing the audio_path. "),
                Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
                     description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
                                 "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
                Tool(name="Target Sound Detection", func=self.TSD.inference,
                     description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
                                 "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]                
            self.agent = initialize_agent(
                self.tools,
                self.llm,
                agent="conversational-react-description",
                verbose=True,
                memory=self.memory,
                return_intermediate_steps=True,
                agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
            return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
    def run_text(self, text, state):
        print("===============Running run_text =============")
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        if res['intermediate_steps'] == []:
            print("======>Current memory:\n %s" % self.agent.memory)
            response = res['output']
            state = state + [(text, response)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
        else:
            tool = res['intermediate_steps'][0][0].tool
            if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
                print("======>Current memory:\n %s" % self.agent.memory)
                response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
            elif tool == "Transcribe Speech":
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
            elif tool == "Detect The Sound Event From The Audio":
                image_filename = res['intermediate_steps'][0][1]
                response = res['output'] + f"*{image_filename}*"
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)       
            elif tool == "Audio Inpainting":
                audio_filename = res['intermediate_steps'][0][0].tool_input
                image_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True)
            elif tool == "Generate a talking human portrait video given a input Audio":
                video_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output'] 
                state = state + [(text, response)]
                print("Outputs:", state)
                return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False)
            print("======>Current memory:\n %s" % self.agent.memory)
            response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
            audio_filename = res['intermediate_steps'][0][1]
            state = state + [(text, response)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
    def run_image_or_audio(self, file, state, txt):
        file_type = file.name[-3:]
        if file_type == "wav":
            print("===============Running run_audio =============")
            print("Inputs:", file, state)
            print("======>Previous memory:\n %s" % self.agent.memory)
            audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
            # audio_load = whisper.load_audio(file.name)
            audio_load, sr = soundfile.read(file.name)
            soundfile.write(audio_filename, audio_load, samplerate = sr)
            description = self.a2t.inference(audio_filename)
            Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \
                           "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description)
            AI_prompt = "Received.  "
            self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
            print("======>Current memory:\n %s" % self.agent.memory)
            #state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)]
            state = state + [(f"*{audio_filename}*", AI_prompt)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False)
        else:
            print("===============Running run_image =============")
            print("Inputs:", file, state)
            print("======>Previous memory:\n %s" % self.agent.memory)
            image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
            print("======>Auto Resize Image...")
            img = Image.open(file.name)
            width, height = img.size
            ratio = min(512 / width, 512 / height)
            width_new, height_new = (round(width * ratio), round(height * ratio))
            img = img.resize((width_new, height_new))
            img = img.convert('RGB')
            img.save(image_filename, "PNG")
            print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
            description = self.i2t.inference(image_filename)
            Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
                           "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
            AI_prompt = "Received.  "
            self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
            print("======>Current memory:\n %s" % self.agent.memory)
            state = state + [(f"*{image_filename}*", AI_prompt)]
            print("Outputs:", state)
            return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False)
    def speech(self, speech_input, state):
        input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
        text = self.asr.translate_english(speech_input)
        print("Inputs:", text, state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
        res = self.agent({"input": text})
        if res['intermediate_steps'] == []:
            print("======>Current memory:\n %s" % self.agent.memory)
            response = res['output']
            output_audio_filename = self.tts.inference(response)
            state = state + [(text, response)]
            print("Outputs:", state)
            return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
        else:
            tool = res['intermediate_steps'][0][0].tool
            if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
                print("======>Current memory:\n %s" % self.agent.memory)
                response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
                output_audio_filename = self.tts.inference(res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
            elif tool == "Transcribe Speech":
                print("======>Current memory:\n %s" % self.agent.memory)
                output_audio_filename = self.tts.inference(res['output'])
                response = res['output']
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
            elif tool == "Detect The Sound Event From The Audio":
                print("======>Current memory:\n %s" % self.agent.memory)
                image_filename = res['intermediate_steps'][0][1]
                output_audio_filename = self.tts.inference(res['output'])
                response = res['output'] + f"*{image_filename}*"
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)   
            elif tool == "Generate a talking human portrait video given a input Audio":
                video_filename = res['intermediate_steps'][0][1]
                print("======>Current memory:\n %s" % self.agent.memory)
                response = res['output'] 
                output_audio_filename = self.tts.inference(res['output'])
                state = state + [(text, response)]
                print("Outputs:", state)
                return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True)
            print("======>Current memory:\n %s" % self.agent.memory)
            response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
            audio_filename = res['intermediate_steps'][0][1]
            Res = "The audio file has been generated and the audio is "
            output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename)
            print(output_audio_filename)
            state = state + [(text, response)]
            response = res['output'] 
            print("Outputs:", state)
            return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
    def inpainting(self, state, audio_filename, image_filename):
        print("===============Running inpainting =============")
        print("Inputs:", state)
        print("======>Previous memory:\n %s" % self.agent.memory)
        new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename)       
        AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"*{new_image_filename}*"
        output_audio_filename = self.tts.inference(AI_prompt)
        self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt
        print("======>Current memory:\n %s" % self.agent.memory)
        state = state + [(f"Audio Inpainting", AI_prompt)]
        print("Outputs:", state)
        return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False)
    def clear_audio(self):
        return gr.Audio.update(value=None, visible=False)
    def clear_input_audio(self):
        return gr.Audio.update(value=None)
    def clear_image(self):
        return gr.Image.update(value=None, visible=False)
    def clear_video(self):
        return gr.Video.update(value=None, visible=False)
    def clear_button(self):
        return gr.Button.update(visible=False)
if __name__ == '__main__':
    bot = ConversationBot()
    with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
        with gr.Row():
            gr.Markdown("## AudioGPT")
        chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False) 
        state = gr.State([])
        with gr.Row() as select_raws:
            with gr.Column(scale=0.7):
                interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type')
            with gr.Column(scale=0.3, min_width=0):
                select = gr.Button("Select")
        
        with gr.Row(visible=False) as text_input_raws:
            with gr.Column(scale=0.7):
                txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
            with gr.Column(scale=0.1, min_width=0):
                run = gr.Button("🏃♂️Run")
            with gr.Column(scale=0.1, min_width=0):
                clear_txt = gr.Button("🔄Clear️")
            with gr.Column(scale=0.1, min_width=0):
                btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"])
        with gr.Row():
            outaudio = gr.Audio(visible=False)
        with gr.Row():
            with gr.Column(scale=0.3, min_width=0):
                outvideo = gr.Video(visible=False)
        with gr.Row():
            show_mel = gr.Image(type="filepath",tool='sketch',visible=False)
        with gr.Row():
            run_button = gr.Button("Predict Masked Place",visible=False)        
        with gr.Row(visible=False) as speech_input_raws: 
            with gr.Column(scale=0.7):
                speech_input = gr.Audio(source="microphone", type="filepath", label="Input")
            with gr.Column(scale=0.15, min_width=0):
                submit_btn = gr.Button("🏃♂️Submit")
            with gr.Column(scale=0.15, min_width=0):
                clear_speech = gr.Button("🔄Clear️")
            with gr.Row():
                speech_output = gr.Audio(label="Output",visible=False)
        select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws])
        txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
        txt.submit(lambda: "", None, txt)
        run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
        run.click(lambda: "", None, txt)
        btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo])
        run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button])
        clear_txt.click(bot.memory.clear)
        clear_txt.click(lambda: [], None, chatbot)
        clear_txt.click(lambda: [], None, state)
        clear_txt.click(lambda:None, None, txt)
        clear_txt.click(bot.clear_button, None, run_button)
        clear_txt.click(bot.clear_image, None, show_mel)
        clear_txt.click(bot.clear_audio, None, outaudio)
        clear_txt.click(bot.clear_video, None, outvideo)
        submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo])
        clear_speech.click(bot.clear_input_audio, None, speech_input)
        clear_speech.click(bot.clear_audio, None, speech_output)
        clear_speech.click(lambda: [], None, state)
        clear_speech.click(bot.clear_video, None, outvideo)
        demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | 
	[
  "langchain.llms.openai.OpenAI",
  "langchain.agents.tools.Tool",
  "langchain.chains.conversation.memory.ConversationBufferMemory",
  "langchain.agents.initialize.initialize_agent"
] | 
	[((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'), ((4131, 4155), 'numpy.hstack', 'np.hstack', (['merged_signal'], {}), '(merged_signal)\n', (4140, 4155), True, 'import numpy as np\n'), ((4176, 4217), 'numpy.asarray', 'np.asarray', (['merged_signal'], {'dtype': 'np.int16'}), '(merged_signal, dtype=np.int16)\n', (4186, 4217), True, 'import numpy as np\n'), ((4298, 4348), 'scipy.io.wavfile.write', 'wavfile.write', (['audio_filename', 'sr_2', 'merged_signal'], {}), '(audio_filename, sr_2, merged_signal)\n', (4311, 4348), True, 'import scipy.io.wavfile as wavfile\n'), ((53, 79), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((4682, 4786), 'diffusers.StableDiffusionPipeline.from_pretrained', 'StableDiffusionPipeline.from_pretrained', (['"""runwayml/stable-diffusion-v1-5"""'], {'torch_dtype': 'torch.float16'}), "('runwayml/stable-diffusion-v1-5',\n    torch_dtype=torch.float16)\n", (4721, 4786), False, 'from diffusers import StableDiffusionPipeline\n'), ((4820, 4892), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4849, 4892), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((4926, 5005), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4962, 5005), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((5043, 5163), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n    text_refine_tokenizer, device=self.device)\n", (5051, 5163), False, 'from transformers import pipeline\n'), ((5875, 5945), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (5904, 5945), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6704, 6792), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (6718, 6792), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((6861, 6883), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (6875, 6883), False, 'from omegaconf import OmegaConf\n'), ((6900, 6937), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (6923, 6937), False, 'from ldm.util import instantiate_from_config\n'), ((7187, 7205), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (7198, 7205), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((7378, 7405), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7399, 7405), True, 'import numpy as np\n'), ((8501, 8560), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (8512, 8560), False, 'import torch\n'), ((10157, 10217), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (10172, 10217), False, 'import soundfile\n'), ((10695, 10783), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (10709, 10783), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((10852, 10874), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (10866, 10874), False, 'from omegaconf import OmegaConf\n'), ((10891, 10928), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (10914, 10928), False, 'from ldm.util import instantiate_from_config\n'), ((11178, 11196), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (11189, 11196), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((11399, 11426), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (11420, 11426), True, 'import numpy as np\n'), ((11759, 11776), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (11769, 11776), False, 'from PIL import Image\n'), ((12720, 12779), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (12731, 12779), False, 'import torch\n'), ((13377, 13437), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (13392, 13437), False, 'import soundfile\n'), ((13967, 13996), 'inference.tts.PortaSpeech.TTSInference', 'TTSInference', (['self.hp', 'device'], {}), '(self.hp, device)\n', (13979, 13996), False, 'from inference.tts.PortaSpeech import TTSInference\n'), ((14039, 14095), 'utils.hparams.set_hparams', 'set_hparams', ([], {'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(exp_name=self.exp_name, print_hparams=False)\n', (14050, 14095), False, 'from utils.hparams import set_hparams\n'), ((14345, 14399), 'soundfile.write', 'soundfile.write', (['audio_filename', 'out'], {'samplerate': '(22050)'}), '(audio_filename, out, samplerate=22050)\n', (14360, 14399), False, 'import soundfile\n'), ((14913, 14948), 'inference.svs.ds_e2e.DiffSingerE2EInfer', 'DiffSingerE2EInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (14931, 14948), False, 'from inference.svs.ds_e2e import DiffSingerE2EInfer\n'), ((15398, 15474), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (15409, 15474), False, 'from utils.hparams import set_hparams\n'), ((18112, 18174), 'soundfile.write', 'soundfile.write', (['audio_filename', 'wav'], {'samplerate': 'self.model.fs'}), '(audio_filename, wav, samplerate=self.model.fs)\n', (18127, 18174), False, 'import soundfile\n'), ((18678, 18711), 'inference.tts.GenerSpeech.GenerSpeechInfer', 'GenerSpeechInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (18694, 18711), False, 'from inference.tts.GenerSpeech import GenerSpeechInfer\n'), ((18754, 18830), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (18765, 18830), False, 'from utils.hparams import set_hparams\n'), ((18914, 18941), 'os.path.exists', 'os.path.exists', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18928, 18941), False, 'import os\n'), ((20145, 20233), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n    device=device)\n", (20159, 20233), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((20354, 20376), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (20368, 20376), False, 'from omegaconf import OmegaConf\n'), ((20393, 20430), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (20416, 20430), False, 'from ldm.util import instantiate_from_config\n'), ((20737, 20755), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (20748, 20755), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((21544, 21574), 'scipy.io.wavfile.read', 'wavfile.read', (['input_audio_path'], {}), '(input_audio_path)\n', (21556, 21574), True, 'import scipy.io.wavfile as wavfile\n'), ((21855, 21915), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (21871, 21915), False, 'import librosa\n'), ((22182, 22209), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22198, 22209), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((22619, 22679), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (22635, 22679), False, 'import librosa\n'), ((22945, 22972), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22961, 22972), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((23512, 23539), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (23533, 23539), True, 'import numpy as np\n'), ((23834, 23899), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["batch['mask']"], {'size': 'c.shape[-2:]'}), "(batch['mask'], size=c.shape[-2:])\n", (23865, 23899), False, 'import torch\n'), ((23960, 23985), 'torch.cat', 'torch.cat', (['(c, cc)'], {'dim': '(1)'}), '((c, cc), dim=1)\n', (23969, 23985), False, 'import torch\n'), ((24438, 24495), 'torch.clamp', 'torch.clamp', (["((batch['mel'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mel'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24449, 24495), False, 'import torch\n'), ((24506, 24564), 'torch.clamp', 'torch.clamp', (["((batch['mask'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mask'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24517, 24564), False, 'import torch\n'), ((24584, 24643), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (24595, 24643), False, 'import torch\n'), ((24954, 24983), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (24976, 24983), False, 'import torch\n'), ((25002, 25035), 'PIL.Image.open', 'Image.open', (["mel_and_mask['image']"], {}), "(mel_and_mask['image'])\n", (25012, 25035), False, 'from PIL import Image\n'), ((25055, 25087), 'PIL.Image.open', 'Image.open', (["mel_and_mask['mask']"], {}), "(mel_and_mask['mask'])\n", (25065, 25087), False, 'from PIL import Image\n'), ((25306, 25398), 'numpy.pad', 'np.pad', (['mask', '((0, 0), (0, mel_len - mask.shape[1]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mask, ((0, 0), (0, mel_len - mask.shape[1])), mode='constant',\n    constant_values=0)\n", (25312, 25398), True, 'import numpy as np\n'), ((26269, 26327), 'soundfile.write', 'soundfile.write', (['audio_filename', 'gen_wav'], {'samplerate': '(16000)'}), '(audio_filename, gen_wav, samplerate=16000)\n', (26284, 26327), False, 'import soundfile\n'), ((26527, 26568), 'whisper.load_model', 'whisper.load_model', (['"""base"""'], {'device': 'device'}), "('base', device=device)\n", (26545, 26568), False, 'import whisper\n'), ((26623, 26653), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (26641, 26653), False, 'import whisper\n'), ((26670, 26696), 'whisper.pad_or_trim', 'whisper.pad_or_trim', (['audio'], {}), '(audio)\n', (26689, 26696), False, 'import whisper\n'), ((26831, 26856), 'whisper.DecodingOptions', 'whisper.DecodingOptions', ([], {}), '()\n', (26854, 26856), False, 'import whisper\n'), ((26874, 26914), 'whisper.decode', 'whisper.decode', (['self.model', 'mel', 'options'], {}), '(self.model, mel, options)\n', (26888, 26914), False, 'import whisper\n'), ((27312, 27373), 'audio_to_text.inference_waveform.AudioCapModel', 'AudioCapModel', (['"""audio_to_text/audiocaps_cntrstv_cnn14rnn_trm"""'], {}), "('audio_to_text/audiocaps_cntrstv_cnn14rnn_trm')\n", (27325, 27373), False, 'from audio_to_text.inference_waveform import AudioCapModel\n'), ((27427, 27457), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (27445, 27457), False, 'import whisper\n'), ((27863, 27884), 'audio_to_face.GeneFace_binding.GeneFaceInfer', 'GeneFaceInfer', (['device'], {}), '(device)\n', (27876, 27884), False, 'from audio_to_face.GeneFace_binding import GeneFaceInfer\n'), ((29085, 29267), 'audio_infer.pytorch.models.PVT', 'PVT', ([], {'sample_rate': 'self.sample_rate', 'window_size': 'self.window_size', 'hop_size': 'self.hop_size', 'mel_bins': 'self.mel_bins', 'fmin': 'self.fmin', 'fmax': 'self.fmax', 'classes_num': 'self.classes_num'}), '(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=\n    self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,\n    classes_num=self.classes_num)\n', (29088, 29267), False, 'from audio_infer.pytorch.models import PVT\n'), ((29306, 29364), 'torch.load', 'torch.load', (['self.checkpoint_path'], {'map_location': 'self.device'}), '(self.checkpoint_path, map_location=self.device)\n', (29316, 29364), False, 'import torch\n'), ((29531, 29592), 'librosa.core.load', 'librosa.core.load', (['audio_path'], {'sr': 'self.sample_rate', 'mono': '(True)'}), '(audio_path, sr=self.sample_rate, mono=True)\n', (29548, 29592), False, 'import librosa\n'), ((29672, 29698), 'torch.from_numpy', 'torch.from_numpy', (['waveform'], {}), '(waveform)\n', (29688, 29698), False, 'import torch\n'), ((30663, 30711), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 4)'}), '(2, 1, sharex=True, figsize=(10, 4))\n', (30675, 30711), True, 'import matplotlib.pyplot as plt\n'), ((31471, 31489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31487, 31489), True, 'import matplotlib.pyplot as plt\n'), ((31578, 31605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_filename'], {}), '(image_filename)\n', (31589, 31605), True, 'import matplotlib.pyplot as plt\n'), ((31952, 31958), 'sound_extraction.utils.stft.STFT', 'STFT', ([], {}), '()\n', (31956, 31958), False, 'from sound_extraction.utils.stft import STFT\n'), ((32046, 32073), 'torch.load', 'torch.load', (['self.model_file'], {}), '(self.model_file)\n', (32056, 32073), False, 'import torch\n'), ((32416, 32436), 'sound_extraction.utils.wav_io.load_wav', 'load_wav', (['audio_path'], {}), '(audio_path)\n', (32424, 32436), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33156, 33189), 'sound_extraction.utils.wav_io.save_wav', 'save_wav', (['est_wav', 'audio_filename'], {}), '(est_wav, audio_filename)\n', (33164, 33189), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33854, 33920), 'src.models.BinauralNetwork', 'BinauralNetwork', ([], {'view_dim': '(7)', 'warpnet_layers': '(4)', 'warpnet_channels': '(64)'}), '(view_dim=7, warpnet_layers=4, warpnet_channels=64)\n', (33869, 33920), False, 'from src.models import BinauralNetwork\n'), ((34119, 34171), 'librosa.load', 'librosa.load', ([], {'path': 'audio_path', 'sr': 'self.sr', 'mono': '(True)'}), '(path=audio_path, sr=self.sr, mono=True)\n', (34131, 34171), False, 'import librosa\n'), ((34187, 34209), 'torch.from_numpy', 'torch.from_numpy', (['mono'], {}), '(mono)\n', (34203, 34209), False, 'import torch\n'), ((34311, 34331), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (34325, 34331), False, 'import random\n'), ((34433, 34455), 'torch.from_numpy', 'torch.from_numpy', (['view'], {}), '(view)\n', (34449, 34455), False, 'import torch\n'), ((35860, 35918), 'torch.cat', 'torch.cat', (["[chunk['binaural'] for chunk in chunks]"], {'dim': '(-1)'}), "([chunk['binaural'] for chunk in chunks], dim=-1)\n", (35869, 35918), False, 'import torch\n'), ((36151, 36196), 'torchaudio.save', 'torchaudio.save', (['audio_filename', 'binaural', 'sr'], {}), '(audio_filename, binaural, sr)\n', (36166, 36196), False, 'import torchaudio\n'), ((36806, 36819), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (36816, 36819), True, 'import numpy as np\n'), ((36849, 36890), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'self.device'}), "('ViT-B/32', device=self.device)\n", (36858, 36890), False, 'import clip\n'), ((37034, 37147), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth"""'], {'map_location': '"""cpu"""'}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth',\n    map_location='cpu')\n", (37044, 37147), False, 'import torch\n'), ((37460, 37610), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'\n    , map_location=lambda storage, loc: storage)\n", (37470, 37610), False, 'import torch\n'), ((38016, 38103), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth"""'], {}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')\n", (38026, 38103), False, 'import torch\n'), ((38122, 38208), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth"""'], {}), "(\n    'audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')\n", (38132, 38208), False, 'import torch\n'), ((38290, 38321), 'soundfile.read', 'sf.read', (['fname'], {'dtype': '"""float32"""'}), "(fname, dtype='float32')\n", (38297, 38321), True, 'import soundfile as sf\n'), ((38439, 38469), 'librosa.resample', 'librosa.resample', (['y', 'sr', '(22050)'], {}), '(y, sr, 22050)\n', (38455, 38469), False, 'import librosa\n'), ((39559, 39586), 'torch.from_numpy', 'torch.from_numpy', (['embedding'], {}), '(embedding)\n', (39575, 39586), False, 'import torch\n'), ((39796, 39820), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (39812, 39820), False, 'import torch\n'), ((40169, 40218), 'target_sound_detection.src.utils.median_filter', 'median_filter', (['pred'], {'window_size': '(1)', 'threshold': '(0.5)'}), '(pred, window_size=1, threshold=0.5)\n', (40182, 40218), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((45032, 45049), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (45047, 45049), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((45135, 45379), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n    'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n    False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n    True, device=self.device)\n", (45149, 45379), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((45613, 45640), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (45627, 45640), False, 'import soundfile\n'), ((47031, 47048), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (47046, 47048), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((47134, 47378), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n    'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n    False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n    True, device=self.device)\n", (47148, 47378), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((47597, 47624), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (47611, 47624), False, 'import soundfile\n'), ((48487, 48508), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (48493, 48508), False, 'from langchain.llms.openai import OpenAI\n'), ((49380, 49452), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (49404, 49452), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((75007, 75049), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75022, 75049), True, 'import gradio as gr\n'), ((75098, 75125), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (75113, 75125), True, 'import gradio as gr\n'), ((75168, 75210), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75183, 75210), True, 'import gradio as gr\n'), ((75253, 75295), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75268, 75295), True, 'import gradio as gr\n'), ((75339, 75370), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (75355, 75370), True, 'import gradio as gr\n'), ((75437, 75493), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (75446, 75493), True, 'import gradio as gr\n'), ((75583, 75645), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""AudioGPT"""', 'visible': '(False)'}), "(elem_id='chatbot', label='AudioGPT', visible=False)\n", (75593, 75645), True, 'import gradio as gr\n'), ((75663, 75675), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (75671, 75675), True, 'import gradio as gr\n'), ((130, 156), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'import os\n'), ((205, 231), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((293, 319), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'import os\n'), ((399, 425), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'import os\n'), ((493, 519), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (509, 519), False, 'import os\n'), ((9915, 9930), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9928, 9930), False, 'import torch\n'), ((13139, 13154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13152, 13154), False, 'import torch\n'), ((18985, 19005), 'numpy.load', 'np.load', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18992, 19005), True, 'import numpy as np\n'), ((20568, 20593), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20591, 20593), False, 'import torch\n'), ((20544, 20564), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20556, 20564), False, 'import torch\n'), ((20599, 20618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (20611, 20618), False, 'import torch\n'), ((21770, 21796), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (21785, 21796), False, 'import librosa\n'), ((22054, 22113), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22060, 22113), True, 'import numpy as np\n'), ((22534, 22560), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (22549, 22560), False, 'import librosa\n'), ((22818, 22877), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22824, 22877), True, 'import numpy as np\n'), ((25442, 25457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25455, 25457), False, 'import torch\n'), ((27988, 28016), 'os.path.basename', 'os.path.basename', (['audio_path'], {}), '(audio_path)\n', (28004, 28016), False, 'import os\n'), ((29774, 29789), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29787, 29789), False, 'import torch\n'), ((31019, 31067), 'numpy.arange', 'np.arange', (['(0)', 'frames_num', 'self.frames_per_second'], {}), '(0, frames_num, self.frames_per_second)\n', (31028, 31067), True, 'import numpy as np\n'), ((31105, 31154), 'numpy.arange', 'np.arange', (['(0)', '(frames_num / self.frames_per_second)'], {}), '(0, frames_num / self.frames_per_second)\n', (31114, 31154), True, 'import numpy as np\n'), ((31187, 31206), 'numpy.arange', 'np.arange', (['(0)', 'top_k'], {}), '(0, top_k)\n', (31196, 31206), True, 'import numpy as np\n'), ((40409, 40472), 'target_sound_detection.src.utils.decode_with_timestamps', 'decode_with_timestamps', (['target_event', 'filtered_pred[index_k, :]'], {}), '(target_event, filtered_pred[index_k, :])\n', (40431, 40472), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((47818, 47879), 'soundfile.write', 'soundfile.write', (['audio_filename', 'enh_speech[0]'], {'samplerate': 'sr'}), '(audio_filename, enh_speech[0], samplerate=sr)\n', (47833, 47879), False, 'import soundfile\n'), ((57651, 57951), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (57667, 57951), False, 'from langchain.agents.initialize import initialize_agent\n'), ((62445, 62745), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n    'conversational-react-description', verbose=True, memory=self.memory,\n    return_intermediate_steps=True, agent_kwargs={'prefix':\n    AUDIO_CHATGPT_PREFIX, 'format_instructions':\n    AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (62461, 62745), False, 'from langchain.agents.initialize import initialize_agent\n'), ((67323, 67348), 'soundfile.read', 'soundfile.read', (['file.name'], {}), '(file.name)\n', (67337, 67348), False, 'import soundfile\n'), ((67361, 67419), 'soundfile.write', 'soundfile.write', (['audio_filename', 'audio_load'], {'samplerate': 'sr'}), '(audio_filename, audio_load, samplerate=sr)\n', (67376, 67419), False, 'import soundfile\n'), ((68723, 68744), 'PIL.Image.open', 'Image.open', (['file.name'], {}), '(file.name)\n', (68733, 68744), False, 'from PIL import Image\n'), ((74811, 74841), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74826, 74841), True, 'import gradio as gr\n'), ((74843, 74898), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'new_audio_filename', 'visible': '(True)'}), '(value=new_audio_filename, visible=True)\n', (74858, 74898), True, 'import gradio as gr\n'), ((74900, 74930), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74915, 74930), True, 'import gradio as gr\n'), ((74932, 74963), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74948, 74963), True, 'import gradio as gr\n'), ((75516, 75524), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75522, 75524), True, 'import gradio as gr\n'), ((75538, 75564), 'gradio.Markdown', 'gr.Markdown', (['"""## AudioGPT"""'], {}), "('## AudioGPT')\n", (75549, 75564), True, 'import gradio as gr\n'), ((75690, 75698), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75696, 75698), True, 'import gradio as gr\n'), ((75985, 76006), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (75991, 76006), True, 'import gradio as gr\n'), ((76544, 76552), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76550, 76552), True, 'import gradio as gr\n'), ((76577, 76600), 'gradio.Audio', 'gr.Audio', ([], {'visible': '(False)'}), '(visible=False)\n', (76585, 76600), True, 'import gradio as gr\n'), ((76614, 76622), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76620, 76622), True, 'import gradio as gr\n'), ((76740, 76748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76746, 76748), True, 'import gradio as gr\n'), ((76773, 76828), 'gradio.Image', 'gr.Image', ([], {'type': '"""filepath"""', 'tool': '"""sketch"""', 'visible': '(False)'}), "(type='filepath', tool='sketch', visible=False)\n", (76781, 76828), True, 'import gradio as gr\n'), ((76840, 76848), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76846, 76848), True, 'import gradio as gr\n'), ((76875, 76923), 'gradio.Button', 'gr.Button', (['"""Predict Masked Place"""'], {'visible': '(False)'}), "('Predict Masked Place', visible=False)\n", (76884, 76923), True, 'import gradio as gr\n'), ((76945, 76966), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (76951, 76966), True, 'import gradio as gr\n'), ((5967, 6057), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n    'Salesforce/blip-image-captioning-base')\n", (6011, 6057), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6968, 7004), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (6978, 7004), False, 'import torch\n'), ((7534, 7562), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (7550, 7562), False, 'import torch\n'), ((9137, 9162), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9160, 9162), False, 'import torch\n'), ((9661, 9681), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (9669, 9681), True, 'import numpy as np\n'), ((10959, 10995), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (10969, 10995), False, 'import torch\n'), ((11555, 11583), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (11571, 11583), False, 'import torch\n'), ((13731, 13756), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13754, 13756), False, 'import torch\n'), ((14598, 14623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14621, 14623), False, 'import torch\n'), ((16575, 16600), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16598, 16600), False, 'import torch\n'), ((18374, 18399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18397, 18399), False, 'import torch\n'), ((20461, 20497), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (20471, 20497), False, 'import torch\n'), ((23657, 23685), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (23673, 23685), False, 'import torch\n'), ((26711, 26745), 'whisper.log_mel_spectrogram', 'whisper.log_mel_spectrogram', (['audio'], {}), '(audio)\n', (26738, 26745), False, 'import whisper\n'), ((27767, 27792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27790, 27792), False, 'import torch\n'), ((30235, 30267), 'numpy.max', 'np.max', (['framewise_output'], {'axis': '(0)'}), '(framewise_output, axis=0)\n', (30241, 30267), True, 'import numpy as np\n'), ((30742, 30754), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (30748, 30754), True, 'import numpy as np\n'), ((31244, 31265), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (31252, 31265), True, 'import numpy as np\n'), ((32456, 32478), 'torch.tensor', 'torch.tensor', (['waveform'], {}), '(waveform)\n', (32468, 32478), False, 'import torch\n'), ((34703, 34725), 'random.randint', 'random.randint', (['(0)', 'm_a'], {}), '(0, m_a)\n', (34717, 34725), False, 'import random\n'), ((35521, 35536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35534, 35536), False, 'import torch\n'), ((35938, 35974), 'torch.clamp', 'torch.clamp', (['binaural'], {'min': '(-1)', 'max': '(1)'}), '(binaural, min=-1, max=1)\n', (35949, 35974), False, 'import torch\n'), ((38646, 38665), 'clip.tokenize', 'clip.tokenize', (['text'], {}), '(text)\n', (38659, 38665), False, 'import clip\n'), ((49580, 49968), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n    description=\n    'useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n    )\n", (49584, 49968), False, 'from langchain.agents.tools import Tool\n'), ((50029, 50275), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n    'useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n    )\n", (50033, 50275), False, 'from langchain.agents.tools import Tool\n'), ((50340, 50626), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n    description=\n    'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n    )\n", (50344, 50626), False, 'from langchain.agents.tools import Tool\n'), ((50687, 51161), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n    'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n    )\n", (50691, 51161), False, 'from langchain.agents.tools import Tool\n'), ((51281, 52061), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n    \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n    func=self.t2s.inference, description=\n    \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n    )\n', (51285, 52061), False, 'from langchain.agents.tools import Tool\n'), ((52228, 52530), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n    inference, description=\n    'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n    )\n", (52232, 52530), False, 'from langchain.agents.tools import Tool\n'), ((53100, 53426), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Enhancement In Single-Channel"""', 'func': 'self.SE_SS_SC.inference', 'description': '"""useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Enhancement In Single-Channel', func=self.SE_SS_SC.\n    inference, description=\n    'useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (53104, 53426), False, 'from langchain.agents.tools import Tool\n'), ((53486, 53762), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Separation In Single-Channel"""', 'func': 'self.SS.inference', 'description': '"""useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Separation In Single-Channel', func=self.SS.inference,\n    description=\n    'useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (53490, 53762), False, 'from langchain.agents.tools import Tool\n'), ((54246, 54479), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From The Image"""', 'func': 'self.i2a.inference', 'description': '"""useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. """'}), "(name='Generate Audio From The Image', func=self.i2a.inference,\n    description=\n    'useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. '\n    )\n", (54250, 54479), False, 'from langchain.agents.tools import Tool\n'), ((54541, 54792), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n    description=\n    'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (54545, 54792), False, 'from langchain.agents.tools import Tool\n'), ((54854, 55191), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Audio Inpainting"""', 'func': 'self.inpaint.show_mel_fn', 'description': '"""useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path."""'}), "(name='Audio Inpainting', func=self.inpaint.show_mel_fn, description=\n    'useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path.'\n    )\n", (54858, 55191), False, 'from langchain.agents.tools import Tool\n'), ((55257, 55513), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Transcribe Speech"""', 'func': 'self.asr.inference', 'description': '"""useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Transcribe Speech', func=self.asr.inference, description=\n    'useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (55261, 55513), False, 'from langchain.agents.tools import Tool\n'), ((55578, 55869), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n    func=self.geneface.inference, description=\n    'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n    )\n", (55582, 55869), False, 'from langchain.agents.tools import Tool\n'), ((55930, 56296), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Detect The Sound Event From The Audio"""', 'func': 'self.detection.inference', 'description': '"""useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Detect The Sound Event From The Audio', func=self.detection.\n    inference, description=\n    'useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (55934, 56296), False, 'from langchain.agents.tools import Tool\n'), ((56356, 56654), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sythesize Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Sythesize Binaural Audio From A Mono Audio Input', func=self.\n    binaural.inference, description=\n    'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (56360, 56654), False, 'from langchain.agents.tools import Tool\n'), ((56714, 57120), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n    'Extract Sound Event From Mixture Audio Based On Language Description',\n    func=self.extraction.inference, description=\n    'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n    )\n", (56718, 57120), False, 'from langchain.agents.tools import Tool\n'), ((57176, 57569), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n    'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n    )\n", (57180, 57569), False, 'from langchain.agents.tools import Tool\n'), ((58070, 58093), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58079, 58093), True, 'import gradio as gr\n'), ((58095, 58119), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58104, 58119), True, 'import gradio as gr\n'), ((58121, 58144), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58130, 58144), True, 'import gradio as gr\n'), ((58146, 58170), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58155, 58170), True, 'import gradio as gr\n'), ((58228, 58514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n    description=\n    'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n    )\n", (58232, 58514), False, 'from langchain.agents.tools import Tool\n'), ((58575, 59049), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n    'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n    )\n", (58579, 59049), False, 'from langchain.agents.tools import Tool\n'), ((59169, 59949), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n    \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n    func=self.t2s.inference, description=\n    \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n    )\n', (59173, 59949), False, 'from langchain.agents.tools import Tool\n'), ((60116, 60418), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n    inference, description=\n    'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n    )\n", (60120, 60418), False, 'from langchain.agents.tools import Tool\n'), ((60478, 60729), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n    description=\n    'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n    )\n", (60482, 60729), False, 'from langchain.agents.tools import Tool\n'), ((60791, 61082), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n    func=self.geneface.inference, description=\n    'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n    )\n", (60795, 61082), False, 'from langchain.agents.tools import Tool\n'), ((61143, 61440), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Generate Binaural Audio From A Mono Audio Input', func=self.\n    binaural.inference, description=\n    'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n    )\n", (61147, 61440), False, 'from langchain.agents.tools import Tool\n'), ((61500, 61906), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n    'Extract Sound Event From Mixture Audio Based On Language Description',\n    func=self.extraction.inference, description=\n    'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n    )\n", (61504, 61906), False, 'from langchain.agents.tools import Tool\n'), ((61962, 62355), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n    'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n    )\n", (61966, 62355), False, 'from langchain.agents.tools import Tool\n'), ((62864, 62888), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62873, 62888), True, 'import gradio as gr\n'), ((62890, 62914), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62899, 62914), True, 'import gradio as gr\n'), ((62916, 62940), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62925, 62940), True, 'import gradio as gr\n'), ((62942, 62965), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (62951, 62965), True, 'import gradio as gr\n'), ((63585, 63615), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63600, 63615), True, 'import gradio as gr\n'), ((63617, 63647), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63632, 63647), True, 'import gradio as gr\n'), ((63649, 63679), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63664, 63679), True, 'import gradio as gr\n'), ((63681, 63712), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63697, 63712), True, 'import gradio as gr\n'), ((66706, 66757), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (66721, 66757), True, 'import gradio as gr\n'), ((66758, 66788), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66773, 66788), True, 'import gradio as gr\n'), ((66790, 66820), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66805, 66820), True, 'import gradio as gr\n'), ((66822, 66853), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66838, 66853), True, 'import gradio as gr\n'), ((68295, 68346), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (68310, 68346), True, 'import gradio as gr\n'), ((68347, 68377), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (68362, 68377), True, 'import gradio as gr\n'), ((69917, 69947), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69932, 69947), True, 'import gradio as gr\n'), ((69949, 69979), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69964, 69979), True, 'import gradio as gr\n'), ((70735, 70762), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (70750, 70762), True, 'import gradio as gr\n'), ((70764, 70822), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (70779, 70822), True, 'import gradio as gr\n'), ((70830, 70860), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (70845, 70860), True, 'import gradio as gr\n'), ((73858, 73885), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73873, 73885), True, 'import gradio as gr\n'), ((73887, 73945), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73902, 73945), True, 'import gradio as gr\n'), ((73953, 73983), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (73968, 73983), True, 'import gradio as gr\n'), ((75732, 75752), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (75741, 75752), True, 'import gradio as gr\n'), ((75789, 75865), 'gradio.Radio', 'gr.Radio', ([], {'choices': "['text', 'speech']", 'value': '"""text"""', 'label': '"""Interaction Type"""'}), "(choices=['text', 'speech'], value='text', label='Interaction Type')\n", (75797, 75865), True, 'import gradio as gr\n'), ((75883, 75916), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (75892, 75916), True, 'import gradio as gr\n'), ((75943, 75962), 'gradio.Button', 'gr.Button', (['"""Select"""'], {}), "('Select')\n", (75952, 75962), True, 'import gradio as gr\n'), ((76044, 76064), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (76053, 76064), True, 'import gradio as gr\n'), ((76219, 76252), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76228, 76252), True, 'import gradio as gr\n'), ((76276, 76301), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Run"""'], {}), "('🏃\\u200d♂️Run')\n", (76285, 76301), True, 'import gradio as gr\n'), ((76314, 76347), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76323, 76347), True, 'import gradio as gr\n'), ((76377, 76397), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (76386, 76397), True, 'import gradio as gr\n'), ((76415, 76448), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76424, 76448), True, 'import gradio as gr\n'), ((76472, 76530), 'gradio.UploadButton', 'gr.UploadButton', (['"""🖼️Upload"""'], {'file_types': "['image', 'audio']"}), "('🖼️Upload', file_types=['image', 'audio'])\n", (76487, 76530), True, 'import gradio as gr\n'), ((76641, 76674), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (76650, 76674), True, 'import gradio as gr\n'), ((76703, 76726), 'gradio.Video', 'gr.Video', ([], {'visible': '(False)'}), '(visible=False)\n', (76711, 76726), True, 'import gradio as gr\n'), ((77007, 77027), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (77016, 77027), True, 'import gradio as gr\n'), ((77060, 77121), 'gradio.Audio', 'gr.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""', 'label': '"""Input"""'}), "(source='microphone', type='filepath', label='Input')\n", (77068, 77121), True, 'import gradio as gr\n'), ((77139, 77173), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77148, 77173), True, 'import gradio as gr\n'), ((77204, 77232), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Submit"""'], {}), "('🏃\\u200d♂️Submit')\n", (77213, 77232), True, 'import gradio as gr\n'), ((77245, 77279), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77254, 77279), True, 'import gradio as gr\n'), ((77312, 77332), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (77321, 77332), True, 'import gradio as gr\n'), ((77350, 77358), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (77356, 77358), True, 'import gradio as gr\n'), ((77392, 77431), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output"""', 'visible': '(False)'}), "(label='Output', visible=False)\n", (77400, 77431), True, 'import gradio as gr\n'), ((4265, 4277), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4275, 4277), False, 'import uuid\n'), ((6139, 6161), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6149, 6161), False, 'from PIL import Image\n'), ((20850, 20871), 'torch.from_numpy', 'torch.from_numpy', (['mel'], {}), '(mel)\n', (20866, 20871), False, 'import torch\n'), ((20926, 20948), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (20942, 20948), False, 'import torch\n'), ((31997, 32012), 'sound_extraction.model.LASSNet.LASSNet', 'LASSNet', (['device'], {}), '(device)\n', (32004, 32012), False, 'from sound_extraction.model.LASSNet import LASSNet\n'), ((38499, 38549), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {}), '(y, **self.MEL_ARGS)\n', (38529, 38549), False, 'import librosa\n'), ((64244, 64274), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64259, 64274), True, 'import gradio as gr\n'), ((64276, 64306), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64291, 64306), True, 'import gradio as gr\n'), ((64308, 64338), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64323, 64338), True, 'import gradio as gr\n'), ((64340, 64371), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64356, 64371), True, 'import gradio as gr\n'), ((71452, 71479), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71467, 71479), True, 'import gradio as gr\n'), ((71481, 71539), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71496, 71539), True, 'import gradio as gr\n'), ((71547, 71577), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (71562, 71577), True, 'import gradio as gr\n'), ((5271, 5283), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5281, 5283), False, 'import uuid\n'), ((9377, 9399), 'torch.FloatTensor', 'torch.FloatTensor', (['wav'], {}), '(wav)\n', (9394, 9399), False, 'import torch\n'), ((10120, 10132), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10130, 10132), False, 'import uuid\n'), ((13340, 13352), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13350, 13352), False, 'import uuid\n'), ((14308, 14320), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14318, 14320), False, 'import uuid\n'), ((16178, 16190), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16188, 16190), False, 'import uuid\n'), ((18075, 18087), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18085, 18087), False, 'import uuid\n'), ((19487, 19499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19497, 19499), False, 'import uuid\n'), ((23290, 23302), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23300, 23302), False, 'import uuid\n'), ((26117, 26129), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26127, 26129), False, 'import uuid\n'), ((26232, 26244), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26242, 26244), False, 'import uuid\n'), ((31541, 31553), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (31551, 31553), False, 'import uuid\n'), ((33070, 33082), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (33080, 33082), False, 'import uuid\n'), ((34346, 34386), 'numpy.loadtxt', 'np.loadtxt', (['self.position_file[rand_int]'], {}), '(self.position_file[rand_int])\n', (34356, 34386), True, 'import numpy as np\n'), ((36088, 36100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36098, 36100), False, 'import uuid\n'), ((45879, 45891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45889, 45891), False, 'import uuid\n'), ((47744, 47756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47754, 47756), False, 'import uuid\n'), ((64588, 64618), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64603, 64618), True, 'import gradio as gr\n'), ((64620, 64650), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64635, 64650), True, 'import gradio as gr\n'), ((64652, 64682), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64667, 64682), True, 'import gradio as gr\n'), ((64684, 64715), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64700, 64715), True, 'import gradio as gr\n'), ((70081, 70093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (70091, 70093), False, 'import uuid\n'), ((71927, 71954), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71942, 71954), True, 'import gradio as gr\n'), ((71956, 72014), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71971, 72014), True, 'import gradio as gr\n'), ((72022, 72052), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72037, 72052), True, 'import gradio as gr\n'), ((76088, 76183), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n    'Enter text and press enter, or upload an image')\n", (76098, 76183), True, 'import gradio as gr\n'), ((47987, 47999), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47997, 47999), False, 'import uuid\n'), ((48159, 48171), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48169, 48171), False, 'import uuid\n'), ((65068, 65098), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65083, 65098), True, 'import gradio as gr\n'), ((65100, 65130), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65115, 65130), True, 'import gradio as gr\n'), ((65132, 65162), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65147, 65162), True, 'import gradio as gr\n'), ((65164, 65195), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65180, 65195), True, 'import gradio as gr\n'), ((67208, 67220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (67218, 67220), False, 'import uuid\n'), ((68627, 68639), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (68637, 68639), False, 'import uuid\n'), ((72538, 72565), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (72553, 72565), True, 'import gradio as gr\n'), ((72567, 72625), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (72582, 72625), True, 'import gradio as gr\n'), ((72633, 72663), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72648, 72663), True, 'import gradio as gr\n'), ((65632, 65683), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (65647, 65683), True, 'import gradio as gr\n'), ((65684, 65714), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65699, 65714), True, 'import gradio as gr\n'), ((65716, 65767), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'image_filename', 'visible': '(True)'}), '(value=image_filename, visible=True)\n', (65731, 65767), True, 'import gradio as gr\n'), ((65768, 65798), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(True)'}), '(visible=True)\n', (65784, 65798), True, 'import gradio as gr\n'), ((73124, 73151), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73139, 73151), True, 'import gradio as gr\n'), ((73153, 73211), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73168, 73211), True, 'import gradio as gr\n'), ((73219, 73270), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (73234, 73270), True, 'import gradio as gr\n'), ((66196, 66226), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66211, 66226), True, 'import gradio as gr\n'), ((66228, 66279), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (66243, 66279), True, 'import gradio as gr\n'), ((66280, 66310), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66295, 66310), True, 'import gradio as gr\n'), ((66312, 66343), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66328, 66343), True, 'import gradio as gr\n')] | 
| 
	from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
__version__ = "0.0.20"
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
    template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
    integration_namespace.integration_cli,
    name="integration",
    help=integration_namespace.__doc__,
)
def version_callback(show_version: bool) -> None:
    if show_version:
        typer.echo(f"langchain-cli {__version__}")
        raise typer.Exit()
@app.callback()
def main(
    version: bool = typer.Option(
        False,
        "--version",
        "-v",
        help="Print the current CLI version.",
        callback=version_callback,
        is_eager=True,
    ),
):
    pass
@app.command()
def serve(
    *,
    port: Annotated[
        Optional[int], typer.Option(help="The port to run the server on")
    ] = None,
    host: Annotated[
        Optional[str], typer.Option(help="The host to run the server on")
    ] = None,
) -> None:
    """
    Start the LangServe app, whether it's a template or an app.
    """
    # see if is a template
    try:
        project_dir = get_package_root()
        pyproject = project_dir / "pyproject.toml"
        get_langserve_export(pyproject)
    except KeyError:
        # not a template
        app_namespace.serve(port=port, host=host)
    else:
        # is a template
        template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
    app()
 | 
	[
  "langchain_cli.namespaces.template.serve",
  "langchain_cli.utils.packages.get_package_root",
  "langchain_cli.namespaces.app.serve",
  "langchain_cli.utils.packages.get_langserve_export"
] | 
	[((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current CLI version."""', 'callback': 'version_callback', 'is_eager': '(True)'}), "(False, '--version', '-v', help=\n    'Print the current CLI version.', callback=version_callback, is_eager=True)\n", (964, 1076), False, 'import typer\n'), ((834, 876), 'typer.echo', 'typer.echo', (['f"""langchain-cli {__version__}"""'], {}), "(f'langchain-cli {__version__}')\n", (844, 876), False, 'import typer\n'), ((891, 903), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (901, 903), False, 'import typer\n'), ((1543, 1561), 'langchain_cli.utils.packages.get_package_root', 'get_package_root', ([], {}), '()\n', (1559, 1561), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1621, 1652), 'langchain_cli.utils.packages.get_langserve_export', 'get_langserve_export', (['pyproject'], {}), '(pyproject)\n', (1641, 1652), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1791, 1837), 'langchain_cli.namespaces.template.serve', 'template_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1815, 1837), True, 'from langchain_cli.namespaces import template as template_namespace\n'), ((1707, 1748), 'langchain_cli.namespaces.app.serve', 'app_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1726, 1748), True, 'from langchain_cli.namespaces import app as app_namespace\n'), ((1219, 1269), 'typer.Option', 'typer.Option', ([], {'help': '"""The port to run the server on"""'}), "(help='The port to run the server on')\n", (1231, 1269), False, 'import typer\n'), ((1328, 1378), 'typer.Option', 'typer.Option', ([], {'help': '"""The host to run the server on"""'}), "(help='The host to run the server on')\n", (1340, 1378), False, 'import typer\n')] | 
| 
	from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
__version__ = "0.0.20"
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
    template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
    integration_namespace.integration_cli,
    name="integration",
    help=integration_namespace.__doc__,
)
def version_callback(show_version: bool) -> None:
    if show_version:
        typer.echo(f"langchain-cli {__version__}")
        raise typer.Exit()
@app.callback()
def main(
    version: bool = typer.Option(
        False,
        "--version",
        "-v",
        help="Print the current CLI version.",
        callback=version_callback,
        is_eager=True,
    ),
):
    pass
@app.command()
def serve(
    *,
    port: Annotated[
        Optional[int], typer.Option(help="The port to run the server on")
    ] = None,
    host: Annotated[
        Optional[str], typer.Option(help="The host to run the server on")
    ] = None,
) -> None:
    """
    Start the LangServe app, whether it's a template or an app.
    """
    # see if is a template
    try:
        project_dir = get_package_root()
        pyproject = project_dir / "pyproject.toml"
        get_langserve_export(pyproject)
    except KeyError:
        # not a template
        app_namespace.serve(port=port, host=host)
    else:
        # is a template
        template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
    app()
 | 
	[
  "langchain_cli.namespaces.template.serve",
  "langchain_cli.utils.packages.get_package_root",
  "langchain_cli.namespaces.app.serve",
  "langchain_cli.utils.packages.get_langserve_export"
] | 
	[((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current CLI version."""', 'callback': 'version_callback', 'is_eager': '(True)'}), "(False, '--version', '-v', help=\n    'Print the current CLI version.', callback=version_callback, is_eager=True)\n", (964, 1076), False, 'import typer\n'), ((834, 876), 'typer.echo', 'typer.echo', (['f"""langchain-cli {__version__}"""'], {}), "(f'langchain-cli {__version__}')\n", (844, 876), False, 'import typer\n'), ((891, 903), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (901, 903), False, 'import typer\n'), ((1543, 1561), 'langchain_cli.utils.packages.get_package_root', 'get_package_root', ([], {}), '()\n', (1559, 1561), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1621, 1652), 'langchain_cli.utils.packages.get_langserve_export', 'get_langserve_export', (['pyproject'], {}), '(pyproject)\n', (1641, 1652), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1791, 1837), 'langchain_cli.namespaces.template.serve', 'template_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1815, 1837), True, 'from langchain_cli.namespaces import template as template_namespace\n'), ((1707, 1748), 'langchain_cli.namespaces.app.serve', 'app_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1726, 1748), True, 'from langchain_cli.namespaces import app as app_namespace\n'), ((1219, 1269), 'typer.Option', 'typer.Option', ([], {'help': '"""The port to run the server on"""'}), "(help='The port to run the server on')\n", (1231, 1269), False, 'import typer\n'), ((1328, 1378), 'typer.Option', 'typer.Option', ([], {'help': '"""The host to run the server on"""'}), "(help='The host to run the server on')\n", (1340, 1378), False, 'import typer\n')] | 
| 
	from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
__version__ = "0.0.20"
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
    template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
    integration_namespace.integration_cli,
    name="integration",
    help=integration_namespace.__doc__,
)
def version_callback(show_version: bool) -> None:
    if show_version:
        typer.echo(f"langchain-cli {__version__}")
        raise typer.Exit()
@app.callback()
def main(
    version: bool = typer.Option(
        False,
        "--version",
        "-v",
        help="Print the current CLI version.",
        callback=version_callback,
        is_eager=True,
    ),
):
    pass
@app.command()
def serve(
    *,
    port: Annotated[
        Optional[int], typer.Option(help="The port to run the server on")
    ] = None,
    host: Annotated[
        Optional[str], typer.Option(help="The host to run the server on")
    ] = None,
) -> None:
    """
    Start the LangServe app, whether it's a template or an app.
    """
    # see if is a template
    try:
        project_dir = get_package_root()
        pyproject = project_dir / "pyproject.toml"
        get_langserve_export(pyproject)
    except KeyError:
        # not a template
        app_namespace.serve(port=port, host=host)
    else:
        # is a template
        template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
    app()
 | 
	[
  "langchain_cli.namespaces.template.serve",
  "langchain_cli.utils.packages.get_package_root",
  "langchain_cli.namespaces.app.serve",
  "langchain_cli.utils.packages.get_langserve_export"
] | 
	[((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current CLI version."""', 'callback': 'version_callback', 'is_eager': '(True)'}), "(False, '--version', '-v', help=\n    'Print the current CLI version.', callback=version_callback, is_eager=True)\n", (964, 1076), False, 'import typer\n'), ((834, 876), 'typer.echo', 'typer.echo', (['f"""langchain-cli {__version__}"""'], {}), "(f'langchain-cli {__version__}')\n", (844, 876), False, 'import typer\n'), ((891, 903), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (901, 903), False, 'import typer\n'), ((1543, 1561), 'langchain_cli.utils.packages.get_package_root', 'get_package_root', ([], {}), '()\n', (1559, 1561), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1621, 1652), 'langchain_cli.utils.packages.get_langserve_export', 'get_langserve_export', (['pyproject'], {}), '(pyproject)\n', (1641, 1652), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1791, 1837), 'langchain_cli.namespaces.template.serve', 'template_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1815, 1837), True, 'from langchain_cli.namespaces import template as template_namespace\n'), ((1707, 1748), 'langchain_cli.namespaces.app.serve', 'app_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1726, 1748), True, 'from langchain_cli.namespaces import app as app_namespace\n'), ((1219, 1269), 'typer.Option', 'typer.Option', ([], {'help': '"""The port to run the server on"""'}), "(help='The port to run the server on')\n", (1231, 1269), False, 'import typer\n'), ((1328, 1378), 'typer.Option', 'typer.Option', ([], {'help': '"""The host to run the server on"""'}), "(help='The host to run the server on')\n", (1340, 1378), False, 'import typer\n')] | 
| 
	from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
__version__ = "0.0.20"
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
    template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
    integration_namespace.integration_cli,
    name="integration",
    help=integration_namespace.__doc__,
)
def version_callback(show_version: bool) -> None:
    if show_version:
        typer.echo(f"langchain-cli {__version__}")
        raise typer.Exit()
@app.callback()
def main(
    version: bool = typer.Option(
        False,
        "--version",
        "-v",
        help="Print the current CLI version.",
        callback=version_callback,
        is_eager=True,
    ),
):
    pass
@app.command()
def serve(
    *,
    port: Annotated[
        Optional[int], typer.Option(help="The port to run the server on")
    ] = None,
    host: Annotated[
        Optional[str], typer.Option(help="The host to run the server on")
    ] = None,
) -> None:
    """
    Start the LangServe app, whether it's a template or an app.
    """
    # see if is a template
    try:
        project_dir = get_package_root()
        pyproject = project_dir / "pyproject.toml"
        get_langserve_export(pyproject)
    except KeyError:
        # not a template
        app_namespace.serve(port=port, host=host)
    else:
        # is a template
        template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
    app()
 | 
	[
  "langchain_cli.namespaces.template.serve",
  "langchain_cli.utils.packages.get_package_root",
  "langchain_cli.namespaces.app.serve",
  "langchain_cli.utils.packages.get_langserve_export"
] | 
	[((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current CLI version."""', 'callback': 'version_callback', 'is_eager': '(True)'}), "(False, '--version', '-v', help=\n    'Print the current CLI version.', callback=version_callback, is_eager=True)\n", (964, 1076), False, 'import typer\n'), ((834, 876), 'typer.echo', 'typer.echo', (['f"""langchain-cli {__version__}"""'], {}), "(f'langchain-cli {__version__}')\n", (844, 876), False, 'import typer\n'), ((891, 903), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (901, 903), False, 'import typer\n'), ((1543, 1561), 'langchain_cli.utils.packages.get_package_root', 'get_package_root', ([], {}), '()\n', (1559, 1561), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1621, 1652), 'langchain_cli.utils.packages.get_langserve_export', 'get_langserve_export', (['pyproject'], {}), '(pyproject)\n', (1641, 1652), False, 'from langchain_cli.utils.packages import get_langserve_export, get_package_root\n'), ((1791, 1837), 'langchain_cli.namespaces.template.serve', 'template_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1815, 1837), True, 'from langchain_cli.namespaces import template as template_namespace\n'), ((1707, 1748), 'langchain_cli.namespaces.app.serve', 'app_namespace.serve', ([], {'port': 'port', 'host': 'host'}), '(port=port, host=host)\n', (1726, 1748), True, 'from langchain_cli.namespaces import app as app_namespace\n'), ((1219, 1269), 'typer.Option', 'typer.Option', ([], {'help': '"""The port to run the server on"""'}), "(help='The port to run the server on')\n", (1231, 1269), False, 'import typer\n'), ((1328, 1378), 'typer.Option', 'typer.Option', ([], {'help': '"""The host to run the server on"""'}), "(help='The host to run the server on')\n", (1340, 1378), False, 'import typer\n')] | 
| 
	import os
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Redis
from langchain_text_splitters import RecursiveCharacterTextSplitter
from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL
def ingest_documents():
    """
    Ingest PDF to Redis from the data/ directory that
    contains Edgar 10k filings data for Nike.
    """
    # Load list of pdfs
    company_name = "Nike"
    data_path = "data/"
    doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0]
    print("Parsing 10k filing doc for NIKE", doc)  # noqa: T201
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1500, chunk_overlap=100, add_start_index=True
    )
    loader = UnstructuredFileLoader(doc, mode="single", strategy="fast")
    chunks = loader.load_and_split(text_splitter)
    print("Done preprocessing. Created", len(chunks), "chunks of the original pdf")  # noqa: T201
    # Create vectorstore
    embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
    _ = Redis.from_texts(
        # appending this little bit can sometimes help with semantic retrieval
        # especially with multiple companies
        texts=[f"Company: {company_name}. " + chunk.page_content for chunk in chunks],
        metadatas=[chunk.metadata for chunk in chunks],
        embedding=embedder,
        index_name=INDEX_NAME,
        index_schema=INDEX_SCHEMA,
        redis_url=REDIS_URL,
    )
if __name__ == "__main__":
    ingest_documents()
 | 
	[
  "langchain_community.vectorstores.Redis.from_texts",
  "langchain_community.document_loaders.UnstructuredFileLoader",
  "langchain_community.embeddings.HuggingFaceEmbeddings",
  "langchain_text_splitters.RecursiveCharacterTextSplitter"
] | 
	[((726, 818), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(100)', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=100,\n    add_start_index=True)\n', (756, 818), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((842, 901), 'langchain_community.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['doc'], {'mode': '"""single"""', 'strategy': '"""fast"""'}), "(doc, mode='single', strategy='fast')\n", (864, 901), False, 'from langchain_community.document_loaders import UnstructuredFileLoader\n'), ((1091, 1136), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'EMBED_MODEL'}), '(model_name=EMBED_MODEL)\n', (1112, 1136), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((1146, 1394), 'langchain_community.vectorstores.Redis.from_texts', 'Redis.from_texts', ([], {'texts': "[(f'Company: {company_name}. ' + chunk.page_content) for chunk in chunks]", 'metadatas': '[chunk.metadata for chunk in chunks]', 'embedding': 'embedder', 'index_name': 'INDEX_NAME', 'index_schema': 'INDEX_SCHEMA', 'redis_url': 'REDIS_URL'}), "(texts=[(f'Company: {company_name}. ' + chunk.page_content) for\n    chunk in chunks], metadatas=[chunk.metadata for chunk in chunks],\n    embedding=embedder, index_name=INDEX_NAME, index_schema=INDEX_SCHEMA,\n    redis_url=REDIS_URL)\n", (1162, 1394), False, 'from langchain_community.vectorstores import Redis\n'), ((572, 601), 'os.path.join', 'os.path.join', (['data_path', 'file'], {}), '(data_path, file)\n', (584, 601), False, 'import os\n'), ((614, 635), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (624, 635), False, 'import os\n')] | 
| 
	from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
 | 
	[
  "langchain_community.document_loaders.CSVLoader",
  "langchain.indexes.VectorstoreIndexCreator"
] | 
	[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] | 
| 
	from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
 | 
	[
  "langchain_community.document_loaders.CSVLoader",
  "langchain.indexes.VectorstoreIndexCreator"
] | 
	[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] | 
| 
	from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
 | 
	[
  "langchain_community.document_loaders.CSVLoader",
  "langchain.indexes.VectorstoreIndexCreator"
] | 
	[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] | 
| 
	from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
index = index_creator.from_documents(docs)
index.vectorstore.save_local("titanic_data")
 | 
	[
  "langchain_community.document_loaders.CSVLoader",
  "langchain.indexes.VectorstoreIndexCreator"
] | 
	[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] | 
| 
	from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question 
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim 
Claim: {question}
Passage:"""  # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
 | 
	[
  "langchain_core.prompts.prompt.PromptTemplate.from_template"
] | 
	[((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] | 
| 
	from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question 
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim 
Claim: {question}
Passage:"""  # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
 | 
	[
  "langchain_core.prompts.prompt.PromptTemplate.from_template"
] | 
	[((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] | 
| 
	from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question 
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim 
Claim: {question}
Passage:"""  # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
 | 
	[
  "langchain_core.prompts.prompt.PromptTemplate.from_template"
] | 
	[((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] | 
| 
	from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question 
Question: {question}
Passage:"""
sci_fact_template = """Please write a scientific paper passage to support/refute the claim 
Claim: {question}
Passage:"""  # noqa: E501
fiqa_template = """Please write a financial article passage to answer the question
Question: {question}
Passage:"""
trec_news_template = """Please write a news passage about the topic.
Topic: {question}
Passage:"""
# For the sake of this example we will use the web search template
hyde_prompt = PromptTemplate.from_template(web_search_template)
 | 
	[
  "langchain_core.prompts.prompt.PromptTemplate.from_template"
] | 
	[((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] | 
| 
	from pathlib import Path
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores import Neo4jVector
from langchain_text_splitters import TokenTextSplitter
txt_path = Path(__file__).parent / "dune.txt"
graph = Neo4jGraph()
# Load the text file
loader = TextLoader(str(txt_path))
documents = loader.load()
# Define chunking strategy
parent_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=24)
child_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=24)
# Store parent-child patterns into graph
parent_documents = parent_splitter.split_documents(documents)
for parent in parent_documents:
    child_documents = child_splitter.split_documents([parent])
    params = {
        "parent": parent.page_content,
        "children": [c.page_content for c in child_documents],
    }
    graph.query(
        """
    CREATE (p:Parent {text: $parent})
    WITH p 
    UNWIND $children AS child
    CREATE (c:Child {text: child})
    CREATE (c)-[:HAS_PARENT]->(p)
    """,
        params,
    )
# Calculate embedding values on the child nodes
Neo4jVector.from_existing_graph(
    OpenAIEmbeddings(),
    index_name="retrieval",
    node_label="Child",
    text_node_properties=["text"],
    embedding_node_property="embedding",
)
 | 
	[
  "langchain_community.embeddings.openai.OpenAIEmbeddings",
  "langchain_community.graphs.Neo4jGraph",
  "langchain_text_splitters.TokenTextSplitter"
] | 
	[((371, 383), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (381, 383), False, 'from langchain_community.graphs import Neo4jGraph\n'), ((513, 564), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(24)'}), '(chunk_size=512, chunk_overlap=24)\n', (530, 564), False, 'from langchain_text_splitters import TokenTextSplitter\n'), ((582, 633), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(100)', 'chunk_overlap': '(24)'}), '(chunk_size=100, chunk_overlap=24)\n', (599, 633), False, 'from langchain_text_splitters import TokenTextSplitter\n'), ((1251, 1269), 'langchain_community.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1267, 1269), False, 'from langchain_community.embeddings.openai import OpenAIEmbeddings\n'), ((327, 341), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (331, 341), False, 'from pathlib import Path\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
| 
	from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
    """
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
 | 
	[
  "langchain_community.graphs.Neo4jGraph"
] | 
	[((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.