code
stringlengths 141
79.4k
| apis
sequencelengths 1
23
| extract_api
stringlengths 126
73.2k
|
---|---|---|
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
"CREATE FULLTEXT INDEX entity IF NOT EXISTS"
" FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
"CREATE FULLTEXT INDEX entity IF NOT EXISTS"
" FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
"CREATE FULLTEXT INDEX entity IF NOT EXISTS"
" FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index for entity matching
# on Person and Movie nodes
graph.query(
"CREATE FULLTEXT INDEX entity IF NOT EXISTS"
" FOR (m:Movie|Person) ON EACH [m.title, m.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
| [
"langchain_core._api.surface_langchain_beta_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
] | [((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_beta_warnings', 'surface_langchain_beta_warnings', ([], {}), '()\n', (387, 389), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((173, 202), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (189, 202), False, 'from importlib import metadata\n')] |
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
| [
"langchain_core._api.surface_langchain_beta_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
] | [((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_beta_warnings', 'surface_langchain_beta_warnings', ([], {}), '()\n', (387, 389), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((173, 202), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (189, 202), False, 'from importlib import metadata\n')] |
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
| [
"langchain_core._api.surface_langchain_beta_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
] | [((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_beta_warnings', 'surface_langchain_beta_warnings', ([], {}), '()\n', (387, 389), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((173, 202), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (189, 202), False, 'from importlib import metadata\n')] |
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
| [
"langchain_core._api.surface_langchain_beta_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
] | [((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_beta_warnings', 'surface_langchain_beta_warnings', ([], {}), '()\n', (387, 389), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((173, 202), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (189, 202), False, 'from importlib import metadata\n')] |
import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
_JS_SERIALIZABLE_MAPPING,
_OG_SERIALIZABLE_MAPPING,
OLD_CORE_NAMESPACES_MAPPING,
SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializable import Serializable
DEFAULT_NAMESPACES = ["langchain", "langchain_core", "langchain_community"]
ALL_SERIALIZABLE_MAPPINGS = {
**SERIALIZABLE_MAPPING,
**OLD_CORE_NAMESPACES_MAPPING,
**_OG_SERIALIZABLE_MAPPING,
**_JS_SERIALIZABLE_MAPPING,
}
class Reviver:
"""Reviver for JSON objects."""
def __init__(
self,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> None:
self.secrets_map = secrets_map or dict()
# By default only support langchain, but user can pass in additional namespaces
self.valid_namespaces = (
[*DEFAULT_NAMESPACES, *valid_namespaces]
if valid_namespaces
else DEFAULT_NAMESPACES
)
def __call__(self, value: Dict[str, Any]) -> Any:
if (
value.get("lc", None) == 1
and value.get("type", None) == "secret"
and value.get("id", None) is not None
):
[key] = value["id"]
if key in self.secrets_map:
return self.secrets_map[key]
else:
if key in os.environ and os.environ[key]:
return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)')
if (
value.get("lc", None) == 1
and value.get("type", None) == "not_implemented"
and value.get("id", None) is not None
):
raise NotImplementedError(
"Trying to load an object that doesn't implement "
f"serialization: {value}"
)
if (
value.get("lc", None) == 1
and value.get("type", None) == "constructor"
and value.get("id", None) is not None
):
[*namespace, name] = value["id"]
if namespace[0] not in self.valid_namespaces:
raise ValueError(f"Invalid namespace: {value}")
# The root namespace "langchain" is not a valid identifier.
if len(namespace) == 1 and namespace[0] == "langchain":
raise ValueError(f"Invalid namespace: {value}")
# If namespace is in known namespaces, try to use mapping
if namespace[0] in DEFAULT_NAMESPACES:
# Get the importable path
key = tuple(namespace + [name])
if key not in ALL_SERIALIZABLE_MAPPINGS:
raise ValueError(
"Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: "
f"{key}"
)
import_path = ALL_SERIALIZABLE_MAPPINGS[key]
# Split into module and name
import_dir, import_obj = import_path[:-1], import_path[-1]
# Import module
mod = importlib.import_module(".".join(import_dir))
# Import class
cls = getattr(mod, import_obj)
# Otherwise, load by path
else:
mod = importlib.import_module(".".join(namespace))
cls = getattr(mod, name)
# The class must be a subclass of Serializable.
if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}")
# We don't need to recurse on kwargs
# as json.loads will do that for us.
kwargs = value.get("kwargs", dict())
return cls(**kwargs)
return value
@beta()
def loads(
text: str,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
@beta()
def load(
obj: Any,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON object. Use this if you already
have a parsed JSON object, eg. from `json.load` or `orjson.loads`.
Args:
obj: The object to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
reviver = Reviver(secrets_map, valid_namespaces)
def _load(obj: Any) -> Any:
if isinstance(obj, dict):
# Need to revive leaf nodes before reviving this node
loaded_obj = {k: _load(v) for k, v in obj.items()}
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
return obj
return _load(obj)
| [
"langchain_core._api.beta"
] | [((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] |
import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
_JS_SERIALIZABLE_MAPPING,
_OG_SERIALIZABLE_MAPPING,
OLD_CORE_NAMESPACES_MAPPING,
SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializable import Serializable
DEFAULT_NAMESPACES = ["langchain", "langchain_core", "langchain_community"]
ALL_SERIALIZABLE_MAPPINGS = {
**SERIALIZABLE_MAPPING,
**OLD_CORE_NAMESPACES_MAPPING,
**_OG_SERIALIZABLE_MAPPING,
**_JS_SERIALIZABLE_MAPPING,
}
class Reviver:
"""Reviver for JSON objects."""
def __init__(
self,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> None:
self.secrets_map = secrets_map or dict()
# By default only support langchain, but user can pass in additional namespaces
self.valid_namespaces = (
[*DEFAULT_NAMESPACES, *valid_namespaces]
if valid_namespaces
else DEFAULT_NAMESPACES
)
def __call__(self, value: Dict[str, Any]) -> Any:
if (
value.get("lc", None) == 1
and value.get("type", None) == "secret"
and value.get("id", None) is not None
):
[key] = value["id"]
if key in self.secrets_map:
return self.secrets_map[key]
else:
if key in os.environ and os.environ[key]:
return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)')
if (
value.get("lc", None) == 1
and value.get("type", None) == "not_implemented"
and value.get("id", None) is not None
):
raise NotImplementedError(
"Trying to load an object that doesn't implement "
f"serialization: {value}"
)
if (
value.get("lc", None) == 1
and value.get("type", None) == "constructor"
and value.get("id", None) is not None
):
[*namespace, name] = value["id"]
if namespace[0] not in self.valid_namespaces:
raise ValueError(f"Invalid namespace: {value}")
# The root namespace "langchain" is not a valid identifier.
if len(namespace) == 1 and namespace[0] == "langchain":
raise ValueError(f"Invalid namespace: {value}")
# If namespace is in known namespaces, try to use mapping
if namespace[0] in DEFAULT_NAMESPACES:
# Get the importable path
key = tuple(namespace + [name])
if key not in ALL_SERIALIZABLE_MAPPINGS:
raise ValueError(
"Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: "
f"{key}"
)
import_path = ALL_SERIALIZABLE_MAPPINGS[key]
# Split into module and name
import_dir, import_obj = import_path[:-1], import_path[-1]
# Import module
mod = importlib.import_module(".".join(import_dir))
# Import class
cls = getattr(mod, import_obj)
# Otherwise, load by path
else:
mod = importlib.import_module(".".join(namespace))
cls = getattr(mod, name)
# The class must be a subclass of Serializable.
if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}")
# We don't need to recurse on kwargs
# as json.loads will do that for us.
kwargs = value.get("kwargs", dict())
return cls(**kwargs)
return value
@beta()
def loads(
text: str,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
@beta()
def load(
obj: Any,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON object. Use this if you already
have a parsed JSON object, eg. from `json.load` or `orjson.loads`.
Args:
obj: The object to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
reviver = Reviver(secrets_map, valid_namespaces)
def _load(obj: Any) -> Any:
if isinstance(obj, dict):
# Need to revive leaf nodes before reviving this node
loaded_obj = {k: _load(v) for k, v in obj.items()}
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
return obj
return _load(obj)
| [
"langchain_core._api.beta"
] | [((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] |
import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
_JS_SERIALIZABLE_MAPPING,
_OG_SERIALIZABLE_MAPPING,
OLD_CORE_NAMESPACES_MAPPING,
SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializable import Serializable
DEFAULT_NAMESPACES = ["langchain", "langchain_core", "langchain_community"]
ALL_SERIALIZABLE_MAPPINGS = {
**SERIALIZABLE_MAPPING,
**OLD_CORE_NAMESPACES_MAPPING,
**_OG_SERIALIZABLE_MAPPING,
**_JS_SERIALIZABLE_MAPPING,
}
class Reviver:
"""Reviver for JSON objects."""
def __init__(
self,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> None:
self.secrets_map = secrets_map or dict()
# By default only support langchain, but user can pass in additional namespaces
self.valid_namespaces = (
[*DEFAULT_NAMESPACES, *valid_namespaces]
if valid_namespaces
else DEFAULT_NAMESPACES
)
def __call__(self, value: Dict[str, Any]) -> Any:
if (
value.get("lc", None) == 1
and value.get("type", None) == "secret"
and value.get("id", None) is not None
):
[key] = value["id"]
if key in self.secrets_map:
return self.secrets_map[key]
else:
if key in os.environ and os.environ[key]:
return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)')
if (
value.get("lc", None) == 1
and value.get("type", None) == "not_implemented"
and value.get("id", None) is not None
):
raise NotImplementedError(
"Trying to load an object that doesn't implement "
f"serialization: {value}"
)
if (
value.get("lc", None) == 1
and value.get("type", None) == "constructor"
and value.get("id", None) is not None
):
[*namespace, name] = value["id"]
if namespace[0] not in self.valid_namespaces:
raise ValueError(f"Invalid namespace: {value}")
# The root namespace "langchain" is not a valid identifier.
if len(namespace) == 1 and namespace[0] == "langchain":
raise ValueError(f"Invalid namespace: {value}")
# If namespace is in known namespaces, try to use mapping
if namespace[0] in DEFAULT_NAMESPACES:
# Get the importable path
key = tuple(namespace + [name])
if key not in ALL_SERIALIZABLE_MAPPINGS:
raise ValueError(
"Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: "
f"{key}"
)
import_path = ALL_SERIALIZABLE_MAPPINGS[key]
# Split into module and name
import_dir, import_obj = import_path[:-1], import_path[-1]
# Import module
mod = importlib.import_module(".".join(import_dir))
# Import class
cls = getattr(mod, import_obj)
# Otherwise, load by path
else:
mod = importlib.import_module(".".join(namespace))
cls = getattr(mod, name)
# The class must be a subclass of Serializable.
if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}")
# We don't need to recurse on kwargs
# as json.loads will do that for us.
kwargs = value.get("kwargs", dict())
return cls(**kwargs)
return value
@beta()
def loads(
text: str,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
@beta()
def load(
obj: Any,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON object. Use this if you already
have a parsed JSON object, eg. from `json.load` or `orjson.loads`.
Args:
obj: The object to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
reviver = Reviver(secrets_map, valid_namespaces)
def _load(obj: Any) -> Any:
if isinstance(obj, dict):
# Need to revive leaf nodes before reviving this node
loaded_obj = {k: _load(v) for k, v in obj.items()}
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
return obj
return _load(obj)
| [
"langchain_core._api.beta"
] | [((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] |
import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
_JS_SERIALIZABLE_MAPPING,
_OG_SERIALIZABLE_MAPPING,
OLD_CORE_NAMESPACES_MAPPING,
SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializable import Serializable
DEFAULT_NAMESPACES = ["langchain", "langchain_core", "langchain_community"]
ALL_SERIALIZABLE_MAPPINGS = {
**SERIALIZABLE_MAPPING,
**OLD_CORE_NAMESPACES_MAPPING,
**_OG_SERIALIZABLE_MAPPING,
**_JS_SERIALIZABLE_MAPPING,
}
class Reviver:
"""Reviver for JSON objects."""
def __init__(
self,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> None:
self.secrets_map = secrets_map or dict()
# By default only support langchain, but user can pass in additional namespaces
self.valid_namespaces = (
[*DEFAULT_NAMESPACES, *valid_namespaces]
if valid_namespaces
else DEFAULT_NAMESPACES
)
def __call__(self, value: Dict[str, Any]) -> Any:
if (
value.get("lc", None) == 1
and value.get("type", None) == "secret"
and value.get("id", None) is not None
):
[key] = value["id"]
if key in self.secrets_map:
return self.secrets_map[key]
else:
if key in os.environ and os.environ[key]:
return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)')
if (
value.get("lc", None) == 1
and value.get("type", None) == "not_implemented"
and value.get("id", None) is not None
):
raise NotImplementedError(
"Trying to load an object that doesn't implement "
f"serialization: {value}"
)
if (
value.get("lc", None) == 1
and value.get("type", None) == "constructor"
and value.get("id", None) is not None
):
[*namespace, name] = value["id"]
if namespace[0] not in self.valid_namespaces:
raise ValueError(f"Invalid namespace: {value}")
# The root namespace "langchain" is not a valid identifier.
if len(namespace) == 1 and namespace[0] == "langchain":
raise ValueError(f"Invalid namespace: {value}")
# If namespace is in known namespaces, try to use mapping
if namespace[0] in DEFAULT_NAMESPACES:
# Get the importable path
key = tuple(namespace + [name])
if key not in ALL_SERIALIZABLE_MAPPINGS:
raise ValueError(
"Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: "
f"{key}"
)
import_path = ALL_SERIALIZABLE_MAPPINGS[key]
# Split into module and name
import_dir, import_obj = import_path[:-1], import_path[-1]
# Import module
mod = importlib.import_module(".".join(import_dir))
# Import class
cls = getattr(mod, import_obj)
# Otherwise, load by path
else:
mod = importlib.import_module(".".join(namespace))
cls = getattr(mod, name)
# The class must be a subclass of Serializable.
if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}")
# We don't need to recurse on kwargs
# as json.loads will do that for us.
kwargs = value.get("kwargs", dict())
return cls(**kwargs)
return value
@beta()
def loads(
text: str,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
@beta()
def load(
obj: Any,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
) -> Any:
"""Revive a LangChain class from a JSON object. Use this if you already
have a parsed JSON object, eg. from `json.load` or `orjson.loads`.
Args:
obj: The object to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
reviver = Reviver(secrets_map, valid_namespaces)
def _load(obj: Any) -> Any:
if isinstance(obj, dict):
# Need to revive leaf nodes before reviving this node
loaded_obj = {k: _load(v) for k, v in obj.items()}
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
return obj
return _load(obj)
| [
"langchain_core._api.beta"
] | [((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [
"langchain_community.graphs.networkx_graph.get_entities",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_community.graphs.networkx_graph.parse_triples"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [
"langchain_community.graphs.networkx_graph.get_entities",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_community.graphs.networkx_graph.parse_triples"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [
"langchain_community.graphs.networkx_graph.get_entities",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_community.graphs.networkx_graph.parse_triples"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [
"langchain_community.graphs.networkx_graph.get_entities",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_community.graphs.networkx_graph.parse_triples"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
# Ingest Documents into a Zep Collection
import os
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000")
ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None)
ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION", "langchaintest")
collection_config = CollectionConfig(
name=ZEP_COLLECTION_NAME,
description="Zep collection for LangChain",
metadata={},
embedding_dimensions=1536,
is_auto_embedded=True,
)
# Load
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = ZepVectorStore.from_documents(
documents=all_splits,
collection_name=ZEP_COLLECTION_NAME,
config=collection_config,
api_url=ZEP_API_URL,
api_key=ZEP_API_KEY,
embedding=FakeEmbeddings(size=1),
)
| [
"langchain_community.document_loaders.WebBaseLoader",
"langchain_community.embeddings.FakeEmbeddings",
"langchain_community.vectorstores.zep.CollectionConfig",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
] | [((338, 392), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""', '"""http://localhost:8000"""'], {}), "('ZEP_API_URL', 'http://localhost:8000')\n", (352, 392), False, 'import os\n'), ((407, 442), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""', 'None'], {}), "('ZEP_API_KEY', None)\n", (421, 442), False, 'import os\n'), ((465, 514), 'os.environ.get', 'os.environ.get', (['"""ZEP_COLLECTION"""', '"""langchaintest"""'], {}), "('ZEP_COLLECTION', 'langchaintest')\n", (479, 514), False, 'import os\n'), ((536, 694), 'langchain_community.vectorstores.zep.CollectionConfig', 'CollectionConfig', ([], {'name': 'ZEP_COLLECTION_NAME', 'description': '"""Zep collection for LangChain"""', 'metadata': '{}', 'embedding_dimensions': '(1536)', 'is_auto_embedded': '(True)'}), "(name=ZEP_COLLECTION_NAME, description=\n 'Zep collection for LangChain', metadata={}, embedding_dimensions=1536,\n is_auto_embedded=True)\n", (552, 694), False, 'from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore\n'), ((726, 795), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://lilianweng.github.io/posts/2023-06-23-agent/"""'], {}), "('https://lilianweng.github.io/posts/2023-06-23-agent/')\n", (739, 795), False, 'from langchain_community.document_loaders import WebBaseLoader\n'), ((842, 905), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (872, 905), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((1180, 1202), 'langchain_community.embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {'size': '(1)'}), '(size=1)\n', (1194, 1202), False, 'from langchain_community.embeddings import FakeEmbeddings\n')] |
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
template = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, you can respond as normal to the user.
Example 1:
Human: Hi!
Assistant: Hi! How are you?
Human: What is the weather in SF?
Assistant: <tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
It is 64 degress in SF
Begin!""" # noqa: E501
conversational_prompt = ChatPromptTemplate.from_messages(
[
("system", template),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{question}"),
("ai", "{agent_scratchpad}"),
]
)
def parse_output(message):
text = message.content
if "</tool>" in text:
tool, tool_input = text.split("</tool>")
_tool = tool.split("<tool>")[1]
_tool_input = tool_input.split("<tool_input>")[1]
if "</tool_input>" in _tool_input:
_tool_input = _tool_input.split("</tool_input>")[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
else:
return AgentFinish(return_values={"output": text}, log=text)
| [
"langchain_core.prompts.MessagesPlaceholder",
"langchain_core.agents.AgentAction",
"langchain_core.agents.AgentFinish"
] | [((1068, 1117), 'langchain_core.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (1087, 1117), False, 'from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n'), ((1548, 1605), 'langchain_core.agents.AgentAction', 'AgentAction', ([], {'tool': '_tool', 'tool_input': '_tool_input', 'log': 'text'}), '(tool=_tool, tool_input=_tool_input, log=text)\n', (1559, 1605), False, 'from langchain_core.agents import AgentAction, AgentFinish\n'), ((1631, 1684), 'langchain_core.agents.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': text}", 'log': 'text'}), "(return_values={'output': text}, log=text)\n", (1642, 1684), False, 'from langchain_core.agents import AgentAction, AgentFinish\n')] |
from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
WITH row
MERGE (m:Movie {id:row.movieId})
SET m.released = date(row.released),
m.title = row.title,
m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
WITH row
MATCH (m:Movie {id:row.movieId})
MERGE (u:User {id:row.userId})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat(row.rating),
r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
"CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
WITH row
MERGE (m:Movie {id:row.movieId})
SET m.released = date(row.released),
m.title = row.title,
m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
WITH row
MATCH (m:Movie {id:row.movieId})
MERGE (u:User {id:row.userId})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat(row.rating),
r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
"CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
WITH row
MERGE (m:Movie {id:row.movieId})
SET m.released = date(row.released),
m.title = row.title,
m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
WITH row
MATCH (m:Movie {id:row.movieId})
MERGE (u:User {id:row.userId})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat(row.rating),
r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
"CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (g:Genre) REQUIRE g.name IS UNIQUE;")
# Import movie information
movies_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies.csv'
AS row
CALL {
WITH row
MERGE (m:Movie {id:row.movieId})
SET m.released = date(row.released),
m.title = row.title,
m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
} IN TRANSACTIONS
"""
graph.query(movies_query)
# Import rating information
rating_query = """
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/ratings.csv'
AS row
CALL {
WITH row
MATCH (m:Movie {id:row.movieId})
MERGE (u:User {id:row.userId})
MERGE (u)-[r:RATED]->(m)
SET r.rating = toFloat(row.rating),
r.timestamp = row.timestamp
} IN TRANSACTIONS OF 10000 ROWS
"""
graph.query(rating_query)
# Define fulltext indices
graph.query("CREATE FULLTEXT INDEX movie IF NOT EXISTS FOR (m:Movie) ON EACH [m.title]")
graph.query(
"CREATE FULLTEXT INDEX person IF NOT EXISTS FOR (p:Person) ON EACH [p.name]"
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
"""Tool for the Exa Search API."""
from typing import Dict, List, Optional, Union
from exa_py import Exa # type: ignore
from exa_py.api import HighlightsContentsOptions, TextContentsOptions # type: ignore
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.tools import BaseTool
from langchain_exa._utilities import initialize_client
class ExaSearchResults(BaseTool):
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "exa_search_results_json"
description: str = (
"A wrapper around Exa Search. "
"Input should be an Exa-optimized query. "
"Output is a JSON array of the query results"
)
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
values = initialize_client(values)
return values
def _run(
self,
query: str,
num_results: int,
text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.client.search_and_contents(
query,
num_results=num_results,
text=text_contents_options, # type: ignore
highlights=highlights, # type: ignore
include_domains=include_domains,
exclude_domains=exclude_domains,
start_crawl_date=start_crawl_date,
end_crawl_date=end_crawl_date,
start_published_date=start_published_date,
end_published_date=end_published_date,
use_autoprompt=use_autoprompt,
) # type: ignore
except Exception as e:
return repr(e)
class ExaFindSimilarResults(BaseTool):
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "exa_find_similar_results_json"
description: str = (
"A wrapper around Exa Find Similar. "
"Input should be an Exa-optimized query. "
"Output is a JSON array of the query results"
)
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
exa_base_url: Optional[str] = None
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate the environment."""
values = initialize_client(values)
return values
def _run(
self,
url: str,
num_results: int,
text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
exclude_source_domain: Optional[bool] = None,
category: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.client.find_similar_and_contents(
url,
num_results=num_results,
text=text_contents_options, # type: ignore
highlights=highlights, # type: ignore
include_domains=include_domains,
exclude_domains=exclude_domains,
start_crawl_date=start_crawl_date,
end_crawl_date=end_crawl_date,
start_published_date=start_published_date,
end_published_date=end_published_date,
exclude_source_domain=exclude_source_domain,
category=category,
) # type: ignore
except Exception as e:
return repr(e)
| [
"langchain_exa._utilities.initialize_client",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((796, 815), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (801, 815), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((845, 864), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (850, 864), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((871, 895), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (885, 895), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2818, 2837), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2823, 2837), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2867, 2886), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2872, 2886), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2932, 2956), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2946, 2956), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((1010, 1035), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (1027, 1035), False, 'from langchain_exa._utilities import initialize_client\n'), ((3071, 3096), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (3088, 3096), False, 'from langchain_exa._utilities import initialize_client\n')] |
from typing import Any, List, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
return super().__add__(other)
| [
"langchain_core.messages.base.merge_content",
"langchain_core.utils._merge.merge_dicts"
] | [((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (1579, 1628), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1702, 1762), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (1713, 1762), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1957, 1999), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1970, 1999), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((2035, 2095), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (2046, 2095), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((2169, 2229), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (2180, 2229), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from typing import Any, List, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
return super().__add__(other)
| [
"langchain_core.messages.base.merge_content",
"langchain_core.utils._merge.merge_dicts"
] | [((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (1579, 1628), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1702, 1762), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (1713, 1762), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1957, 1999), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1970, 1999), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((2035, 2095), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (2046, 2095), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((2169, 2229), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (2180, 2229), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from typing import Any, List, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
return super().__add__(other)
| [
"langchain_core.messages.base.merge_content",
"langchain_core.utils._merge.merge_dicts"
] | [((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (1579, 1628), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1702, 1762), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (1713, 1762), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1957, 1999), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1970, 1999), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((2035, 2095), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (2046, 2095), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((2169, 2229), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (2180, 2229), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from typing import Any, List, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
return super().__add__(other)
| [
"langchain_core.messages.base.merge_content",
"langchain_core.utils._merge.merge_dicts"
] | [((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (1579, 1628), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1702, 1762), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (1713, 1762), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((1957, 1999), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1970, 1999), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((2035, 2095), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (2046, 2095), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((2169, 2229), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (2180, 2229), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""An image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & set(("url", "path", "detail"))
if overlap:
raise ValueError(
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return ImagePromptValue(image_url=self.format(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = v.format(**kwargs)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path:
raise ValueError("Must provide either url or path.")
if not url:
if not isinstance(path, str):
raise ValueError("path must be a string.")
url = image_utils.image_to_data_url(path)
if not isinstance(url, str):
raise ValueError("url must be a string.")
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.utils.image.image_to_data_url"
] | [((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (2420, 2426), True, 'from langchain_core.utils import image as image_utils\n')] |
from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""An image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & set(("url", "path", "detail"))
if overlap:
raise ValueError(
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return ImagePromptValue(image_url=self.format(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = v.format(**kwargs)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path:
raise ValueError("Must provide either url or path.")
if not url:
if not isinstance(path, str):
raise ValueError("path must be a string.")
url = image_utils.image_to_data_url(path)
if not isinstance(url, str):
raise ValueError("url must be a string.")
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.utils.image.image_to_data_url"
] | [((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (2420, 2426), True, 'from langchain_core.utils import image as image_utils\n')] |
from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""An image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & set(("url", "path", "detail"))
if overlap:
raise ValueError(
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return ImagePromptValue(image_url=self.format(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = v.format(**kwargs)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path:
raise ValueError("Must provide either url or path.")
if not url:
if not isinstance(path, str):
raise ValueError("path must be a string.")
url = image_utils.image_to_data_url(path)
if not isinstance(url, str):
raise ValueError("url must be a string.")
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.utils.image.image_to_data_url"
] | [((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (2420, 2426), True, 'from langchain_core.utils import image as image_utils\n')] |
from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""An image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & set(("url", "path", "detail"))
if overlap:
raise ValueError(
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return ImagePromptValue(image_url=self.format(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = v.format(**kwargs)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path:
raise ValueError("Must provide either url or path.")
if not url:
if not isinstance(path, str):
raise ValueError("path must be a string.")
url = image_utils.image_to_data_url(path)
if not isinstance(url, str):
raise ValueError("url must be a string.")
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.utils.image.image_to_data_url"
] | [((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (2420, 2426), True, 'from langchain_core.utils import image as image_utils\n')] |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
import warnings
from typing import Any, Callable, Dict, Type
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.language_models.llms import BaseLLM
from langchain.utils.interactive_env import is_interactive_env
def _import_ai21() -> Any:
from langchain_community.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain_community.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain_community.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain_community.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain_community.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain_community.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain_community.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain_community.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain_community.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain_community.llms.databricks import Databricks
return Databricks
def _import_databricks_chat() -> Any:
from langchain_community.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Any:
from langchain_community.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain_community.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain_community.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain_community.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Any:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
def _import_mlflow_chat() -> Any:
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain_community.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain_community.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain_community.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain_community.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain_community.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain_community.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain_community.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain_community.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain_community.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain_community.llms.self_hosted_hugging_face import (
SelfHostedHuggingFaceLLM,
)
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain_community.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
return TitanTakeoffPro
def _import_together() -> Any:
from langchain_community.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain_community.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain_community.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain_community.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Any:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_writer() -> Any:
from langchain_community.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain_community.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
def _import_volcengine_maas() -> Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
from langchain_community import llms
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing LLMs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.llms import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
if name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
return getattr(llms, name)
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"WatsonxLLM",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"VolcEngineMaasLLM": _import_volcengine_maas,
}
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')] |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
import warnings
from typing import Any, Callable, Dict, Type
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.language_models.llms import BaseLLM
from langchain.utils.interactive_env import is_interactive_env
def _import_ai21() -> Any:
from langchain_community.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain_community.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain_community.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain_community.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain_community.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain_community.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain_community.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain_community.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain_community.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain_community.llms.databricks import Databricks
return Databricks
def _import_databricks_chat() -> Any:
from langchain_community.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Any:
from langchain_community.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain_community.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain_community.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain_community.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Any:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
def _import_mlflow_chat() -> Any:
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain_community.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain_community.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain_community.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain_community.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain_community.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain_community.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain_community.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain_community.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain_community.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain_community.llms.self_hosted_hugging_face import (
SelfHostedHuggingFaceLLM,
)
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain_community.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
return TitanTakeoffPro
def _import_together() -> Any:
from langchain_community.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain_community.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain_community.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain_community.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Any:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_writer() -> Any:
from langchain_community.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain_community.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
def _import_volcengine_maas() -> Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
from langchain_community import llms
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing LLMs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.llms import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
if name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
return getattr(llms, name)
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"WatsonxLLM",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"VolcEngineMaasLLM": _import_volcengine_maas,
}
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')] |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
import warnings
from typing import Any, Callable, Dict, Type
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.language_models.llms import BaseLLM
from langchain.utils.interactive_env import is_interactive_env
def _import_ai21() -> Any:
from langchain_community.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain_community.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain_community.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain_community.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain_community.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain_community.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain_community.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain_community.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain_community.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain_community.llms.databricks import Databricks
return Databricks
def _import_databricks_chat() -> Any:
from langchain_community.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Any:
from langchain_community.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain_community.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain_community.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain_community.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Any:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
def _import_mlflow_chat() -> Any:
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain_community.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain_community.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain_community.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain_community.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain_community.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain_community.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain_community.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain_community.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain_community.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain_community.llms.self_hosted_hugging_face import (
SelfHostedHuggingFaceLLM,
)
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain_community.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
return TitanTakeoffPro
def _import_together() -> Any:
from langchain_community.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain_community.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain_community.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain_community.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Any:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_writer() -> Any:
from langchain_community.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain_community.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
def _import_volcengine_maas() -> Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
from langchain_community import llms
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing LLMs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.llms import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
if name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
return getattr(llms, name)
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"WatsonxLLM",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"VolcEngineMaasLLM": _import_volcengine_maas,
}
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')] |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
import warnings
from typing import Any, Callable, Dict, Type
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.language_models.llms import BaseLLM
from langchain.utils.interactive_env import is_interactive_env
def _import_ai21() -> Any:
from langchain_community.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain_community.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain_community.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain_community.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain_community.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain_community.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain_community.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain_community.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain_community.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain_community.llms.databricks import Databricks
return Databricks
def _import_databricks_chat() -> Any:
from langchain_community.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Any:
from langchain_community.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain_community.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain_community.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain_community.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Any:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
def _import_mlflow_chat() -> Any:
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain_community.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain_community.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain_community.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain_community.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain_community.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain_community.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain_community.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain_community.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain_community.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain_community.llms.self_hosted_hugging_face import (
SelfHostedHuggingFaceLLM,
)
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain_community.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
return TitanTakeoffPro
def _import_together() -> Any:
from langchain_community.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain_community.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain_community.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain_community.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Any:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_writer() -> Any:
from langchain_community.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain_community.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
def _import_volcengine_maas() -> Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
from langchain_community import llms
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing LLMs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.llms import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
if name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
return getattr(llms, name)
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"WatsonxLLM",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"VolcEngineMaasLLM": _import_volcengine_maas,
}
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')] |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
@abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
def delete(self, key: str) -> None:
del self.store[key]
def exists(self, key: str) -> bool:
return key in self.store
def clear(self) -> None:
return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
"""Upstash Redis backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
def __init__(
self,
session_id: str = "default",
url: str = "",
token: str = "",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
def scan_and_delete(cursor: int) -> int:
cursor, keys_to_delete = self.redis_client.scan(
cursor, f"{self.full_key_prefix}:*"
)
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
conn: Any = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
| [
"langchain_community.utilities.redis.get_client",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')] |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
@abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
def delete(self, key: str) -> None:
del self.store[key]
def exists(self, key: str) -> bool:
return key in self.store
def clear(self) -> None:
return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
"""Upstash Redis backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
def __init__(
self,
session_id: str = "default",
url: str = "",
token: str = "",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
def scan_and_delete(cursor: int) -> int:
cursor, keys_to_delete = self.redis_client.scan(
cursor, f"{self.full_key_prefix}:*"
)
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
conn: Any = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
| [
"langchain_community.utilities.redis.get_client",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')] |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
@abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
def delete(self, key: str) -> None:
del self.store[key]
def exists(self, key: str) -> bool:
return key in self.store
def clear(self) -> None:
return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
"""Upstash Redis backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
def __init__(
self,
session_id: str = "default",
url: str = "",
token: str = "",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
def scan_and_delete(cursor: int) -> int:
cursor, keys_to_delete = self.redis_client.scan(
cursor, f"{self.full_key_prefix}:*"
)
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
conn: Any = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
| [
"langchain_community.utilities.redis.get_client",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')] |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
@abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
def delete(self, key: str) -> None:
del self.store[key]
def exists(self, key: str) -> bool:
return key in self.store
def clear(self) -> None:
return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
"""Upstash Redis backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
def __init__(
self,
session_id: str = "default",
url: str = "",
token: str = "",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
def scan_and_delete(cursor: int) -> int:
cursor, keys_to_delete = self.redis_client.scan(
cursor, f"{self.full_key_prefix}:*"
)
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
conn: Any = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
| [
"langchain_community.utilities.redis.get_client",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')] |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
async def abuffer(self) -> Any:
"""String buffer of memory."""
return (
await self.abuffer_as_messages()
if self.return_messages
else await self.abuffer_as_str()
)
def _buffer_as_str(self, messages: List[BaseMessage]) -> str:
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return self._buffer_as_str(self.chat_memory.messages)
async def abuffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = await self.chat_memory.aget_messages()
return self._buffer_as_str(messages)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
async def abuffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return await self.chat_memory.aget_messages()
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain."""
buffer = await self.abuffer()
return {self.memory_key: buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return self.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
return self.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
async def aclear(self) -> None:
self.clear()
| [
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
async def abuffer(self) -> Any:
"""String buffer of memory."""
return (
await self.abuffer_as_messages()
if self.return_messages
else await self.abuffer_as_str()
)
def _buffer_as_str(self, messages: List[BaseMessage]) -> str:
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return self._buffer_as_str(self.chat_memory.messages)
async def abuffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = await self.chat_memory.aget_messages()
return self._buffer_as_str(messages)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
async def abuffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return await self.chat_memory.aget_messages()
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain."""
buffer = await self.abuffer()
return {self.memory_key: buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return self.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
return self.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
async def aclear(self) -> None:
self.clear()
| [
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
async def abuffer(self) -> Any:
"""String buffer of memory."""
return (
await self.abuffer_as_messages()
if self.return_messages
else await self.abuffer_as_str()
)
def _buffer_as_str(self, messages: List[BaseMessage]) -> str:
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return self._buffer_as_str(self.chat_memory.messages)
async def abuffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = await self.chat_memory.aget_messages()
return self._buffer_as_str(messages)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
async def abuffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return await self.chat_memory.aget_messages()
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain."""
buffer = await self.abuffer()
return {self.memory_key: buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return self.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
return self.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
async def aclear(self) -> None:
self.clear()
| [
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
async def abuffer(self) -> Any:
"""String buffer of memory."""
return (
await self.abuffer_as_messages()
if self.return_messages
else await self.abuffer_as_str()
)
def _buffer_as_str(self, messages: List[BaseMessage]) -> str:
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return self._buffer_as_str(self.chat_memory.messages)
async def abuffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = await self.chat_memory.aget_messages()
return self._buffer_as_str(messages)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
async def abuffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return await self.chat_memory.aget_messages()
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain."""
buffer = await self.abuffer()
return {self.memory_key: buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return self.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
return self.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
async def aclear(self) -> None:
self.clear()
| [
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')] |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Prompt image."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt as string."""
return self.image_url["url"]
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas."""
messages: Sequence[AnyMessage]
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
| [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.get_buffer_string"
] | [((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'self.text'}), '(content=self.text)\n', (1812, 1831), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((3085, 3111), 'typing.cast', 'cast', (['dict', 'self.image_url'], {}), '(dict, self.image_url)\n', (3089, 3111), False, 'from typing import List, Literal, Sequence, cast\n')] |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Prompt image."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt as string."""
return self.image_url["url"]
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas."""
messages: Sequence[AnyMessage]
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
| [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.get_buffer_string"
] | [((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'self.text'}), '(content=self.text)\n', (1812, 1831), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((3085, 3111), 'typing.cast', 'cast', (['dict', 'self.image_url'], {}), '(dict, self.image_url)\n', (3089, 3111), False, 'from typing import List, Literal, Sequence, cast\n')] |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Prompt image."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt as string."""
return self.image_url["url"]
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas."""
messages: Sequence[AnyMessage]
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
| [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.get_buffer_string"
] | [((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'self.text'}), '(content=self.text)\n', (1812, 1831), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((3085, 3111), 'typing.cast', 'cast', (['dict', 'self.image_url'], {}), '(dict, self.image_url)\n', (3089, 3111), False, 'from typing import List, Literal, Sequence, cast\n')] |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Prompt image."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt as string."""
return self.image_url["url"]
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas."""
messages: Sequence[AnyMessage]
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
| [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.get_buffer_string"
] | [((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'self.text'}), '(content=self.text)\n', (1812, 1831), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((3085, 3111), 'typing.cast', 'cast', (['dict', 'self.image_url'], {}), '(dict, self.image_url)\n', (3089, 3111), False, 'from typing import List, Literal, Sequence, cast\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_dicts
from langchain_core.utils.interactive_env import is_interactive_env
if TYPE_CHECKING:
from langchain_core.prompts.chat import ChatPromptTemplate
class BaseMessage(Serializable):
"""Base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: Union[str, List[Union[str, Dict]]]
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls."""
response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts."""
type: str
name: Optional[str] = None
id: Optional[str] = None
class Config:
extra = Extra.allow
def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg."""
return super().__init__(content=content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
return prompt + other
def pretty_repr(self, html: bool = False) -> str:
title = get_msg_title_repr(self.type.title() + " Message", bold=html)
# TODO: handle non-string content.
if self.name is not None:
title += f"\nName: {self.name}"
return f"{title}\n\n{self.content}"
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],
second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
"""Merge two message contents.
Args:
first_content: The first content.
second_content: The second content.
Returns:
The merged content.
"""
# If first chunk is a string
if isinstance(first_content, str):
# If the second chunk is also a string, then merge them naively
if isinstance(second_content, str):
return first_content + second_content
# If the second chunk is a list, add the first chunk to the start of the list
else:
return_list: List[Union[str, Dict]] = [first_content]
return return_list + second_content
# If both are lists, merge them naively
elif isinstance(second_content, List):
return first_content + second_content
# If the first content is a list, and the second content is a string
else:
# If the last element of the first content is a string
# Add the second content to the last element
if isinstance(first_content[-1], str):
return first_content[:-1] + [first_content[-1] + second_content]
else:
# Otherwise, add the second content as a new element of the list
return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
"""Message chunk, which can be concatenated with other Message chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
return self.__class__( # type: ignore[call-arg]
id=self.id,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
def message_to_dict(message: BaseMessage) -> dict:
"""Convert a Message to a dictionary.
Args:
message: Message to convert.
Returns:
Message as a dict.
"""
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
if bold:
padded = get_bolded_text(padded)
return f"{sep}{padded}{second_sep}"
| [
"langchain_core.utils.get_bolded_text",
"langchain_core.utils.interactive_env.is_interactive_env",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.chat.ChatPromptTemplate",
"langchain_core.utils._merge.merge_dicts"
] | [((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (955, 977), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((1850, 1885), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (1868, 1885), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((5928, 5951), 'langchain_core.utils.get_bolded_text', 'get_bolded_text', (['padded'], {}), '(padded)\n', (5943, 5951), False, 'from langchain_core.utils import get_bolded_text\n'), ((2313, 2333), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2331, 2333), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((4467, 4527), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (4478, 4527), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((4601, 4661), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (4612, 4661), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_dicts
from langchain_core.utils.interactive_env import is_interactive_env
if TYPE_CHECKING:
from langchain_core.prompts.chat import ChatPromptTemplate
class BaseMessage(Serializable):
"""Base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: Union[str, List[Union[str, Dict]]]
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls."""
response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts."""
type: str
name: Optional[str] = None
id: Optional[str] = None
class Config:
extra = Extra.allow
def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg."""
return super().__init__(content=content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
return prompt + other
def pretty_repr(self, html: bool = False) -> str:
title = get_msg_title_repr(self.type.title() + " Message", bold=html)
# TODO: handle non-string content.
if self.name is not None:
title += f"\nName: {self.name}"
return f"{title}\n\n{self.content}"
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],
second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
"""Merge two message contents.
Args:
first_content: The first content.
second_content: The second content.
Returns:
The merged content.
"""
# If first chunk is a string
if isinstance(first_content, str):
# If the second chunk is also a string, then merge them naively
if isinstance(second_content, str):
return first_content + second_content
# If the second chunk is a list, add the first chunk to the start of the list
else:
return_list: List[Union[str, Dict]] = [first_content]
return return_list + second_content
# If both are lists, merge them naively
elif isinstance(second_content, List):
return first_content + second_content
# If the first content is a list, and the second content is a string
else:
# If the last element of the first content is a string
# Add the second content to the last element
if isinstance(first_content[-1], str):
return first_content[:-1] + [first_content[-1] + second_content]
else:
# Otherwise, add the second content as a new element of the list
return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
"""Message chunk, which can be concatenated with other Message chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
return self.__class__( # type: ignore[call-arg]
id=self.id,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
def message_to_dict(message: BaseMessage) -> dict:
"""Convert a Message to a dictionary.
Args:
message: Message to convert.
Returns:
Message as a dict.
"""
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
if bold:
padded = get_bolded_text(padded)
return f"{sep}{padded}{second_sep}"
| [
"langchain_core.utils.get_bolded_text",
"langchain_core.utils.interactive_env.is_interactive_env",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.chat.ChatPromptTemplate",
"langchain_core.utils._merge.merge_dicts"
] | [((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (955, 977), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((1850, 1885), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (1868, 1885), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((5928, 5951), 'langchain_core.utils.get_bolded_text', 'get_bolded_text', (['padded'], {}), '(padded)\n', (5943, 5951), False, 'from langchain_core.utils import get_bolded_text\n'), ((2313, 2333), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2331, 2333), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((4467, 4527), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (4478, 4527), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((4601, 4661), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (4612, 4661), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_dicts
from langchain_core.utils.interactive_env import is_interactive_env
if TYPE_CHECKING:
from langchain_core.prompts.chat import ChatPromptTemplate
class BaseMessage(Serializable):
"""Base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: Union[str, List[Union[str, Dict]]]
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls."""
response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts."""
type: str
name: Optional[str] = None
id: Optional[str] = None
class Config:
extra = Extra.allow
def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg."""
return super().__init__(content=content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
return prompt + other
def pretty_repr(self, html: bool = False) -> str:
title = get_msg_title_repr(self.type.title() + " Message", bold=html)
# TODO: handle non-string content.
if self.name is not None:
title += f"\nName: {self.name}"
return f"{title}\n\n{self.content}"
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],
second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
"""Merge two message contents.
Args:
first_content: The first content.
second_content: The second content.
Returns:
The merged content.
"""
# If first chunk is a string
if isinstance(first_content, str):
# If the second chunk is also a string, then merge them naively
if isinstance(second_content, str):
return first_content + second_content
# If the second chunk is a list, add the first chunk to the start of the list
else:
return_list: List[Union[str, Dict]] = [first_content]
return return_list + second_content
# If both are lists, merge them naively
elif isinstance(second_content, List):
return first_content + second_content
# If the first content is a list, and the second content is a string
else:
# If the last element of the first content is a string
# Add the second content to the last element
if isinstance(first_content[-1], str):
return first_content[:-1] + [first_content[-1] + second_content]
else:
# Otherwise, add the second content as a new element of the list
return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
"""Message chunk, which can be concatenated with other Message chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
return self.__class__( # type: ignore[call-arg]
id=self.id,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
def message_to_dict(message: BaseMessage) -> dict:
"""Convert a Message to a dictionary.
Args:
message: Message to convert.
Returns:
Message as a dict.
"""
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
if bold:
padded = get_bolded_text(padded)
return f"{sep}{padded}{second_sep}"
| [
"langchain_core.utils.get_bolded_text",
"langchain_core.utils.interactive_env.is_interactive_env",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.chat.ChatPromptTemplate",
"langchain_core.utils._merge.merge_dicts"
] | [((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (955, 977), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((1850, 1885), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (1868, 1885), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((5928, 5951), 'langchain_core.utils.get_bolded_text', 'get_bolded_text', (['padded'], {}), '(padded)\n', (5943, 5951), False, 'from langchain_core.utils import get_bolded_text\n'), ((2313, 2333), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2331, 2333), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((4467, 4527), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (4478, 4527), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((4601, 4661), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (4612, 4661), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_dicts
from langchain_core.utils.interactive_env import is_interactive_env
if TYPE_CHECKING:
from langchain_core.prompts.chat import ChatPromptTemplate
class BaseMessage(Serializable):
"""Base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: Union[str, List[Union[str, Dict]]]
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls."""
response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts."""
type: str
name: Optional[str] = None
id: Optional[str] = None
class Config:
extra = Extra.allow
def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg."""
return super().__init__(content=content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
return prompt + other
def pretty_repr(self, html: bool = False) -> str:
title = get_msg_title_repr(self.type.title() + " Message", bold=html)
# TODO: handle non-string content.
if self.name is not None:
title += f"\nName: {self.name}"
return f"{title}\n\n{self.content}"
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],
second_content: Union[str, List[Union[str, Dict]]],
) -> Union[str, List[Union[str, Dict]]]:
"""Merge two message contents.
Args:
first_content: The first content.
second_content: The second content.
Returns:
The merged content.
"""
# If first chunk is a string
if isinstance(first_content, str):
# If the second chunk is also a string, then merge them naively
if isinstance(second_content, str):
return first_content + second_content
# If the second chunk is a list, add the first chunk to the start of the list
else:
return_list: List[Union[str, Dict]] = [first_content]
return return_list + second_content
# If both are lists, merge them naively
elif isinstance(second_content, List):
return first_content + second_content
# If the first content is a list, and the second content is a string
else:
# If the last element of the first content is a string
# Add the second content to the last element
if isinstance(first_content[-1], str):
return first_content[:-1] + [first_content[-1] + second_content]
else:
# Otherwise, add the second content as a new element of the list
return first_content + [second_content]
class BaseMessageChunk(BaseMessage):
"""Message chunk, which can be concatenated with other Message chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
return self.__class__( # type: ignore[call-arg]
id=self.id,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
def message_to_dict(message: BaseMessage) -> dict:
"""Convert a Message to a dictionary.
Args:
message: Message to convert.
Returns:
Message as a dict.
"""
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
if bold:
padded = get_bolded_text(padded)
return f"{sep}{padded}{second_sep}"
| [
"langchain_core.utils.get_bolded_text",
"langchain_core.utils.interactive_env.is_interactive_env",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.chat.ChatPromptTemplate",
"langchain_core.utils._merge.merge_dicts"
] | [((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (955, 977), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((1850, 1885), 'langchain_core.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': '[self]'}), '(messages=[self])\n', (1868, 1885), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((5928, 5951), 'langchain_core.utils.get_bolded_text', 'get_bolded_text', (['padded'], {}), '(padded)\n', (5943, 5951), False, 'from langchain_core.utils import get_bolded_text\n'), ((2313, 2333), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2331, 2333), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((4467, 4527), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.additional_kwargs', 'other.additional_kwargs'], {}), '(self.additional_kwargs, other.additional_kwargs)\n', (4478, 4527), False, 'from langchain_core.utils._merge import merge_dicts\n'), ((4601, 4661), 'langchain_core.utils._merge.merge_dicts', 'merge_dicts', (['self.response_metadata', 'other.response_metadata'], {}), '(self.response_metadata, other.response_metadata)\n', (4612, 4661), False, 'from langchain_core.utils._merge import merge_dicts\n')] |
"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
LANGCHAINHUB_REPO = "https://raw.githubusercontent.com/hwchase17/langchain-hub/"
URL_BASE = os.environ.get(
"LANGCHAIN_HUB_URL_BASE",
LANGCHAINHUB_REPO + "{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
@deprecated(
since="0.1.30",
removal="0.2",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Please use "
"https://smith.langchain.com/hub instead."
),
)
def try_load_from_hub(
path: Union[str, Path],
loader: Callable[[str], T],
valid_prefix: str,
valid_suffixes: Set[str],
**kwargs: Any,
) -> Optional[T]:
"""Load configuration from hub. Returns None if path is not a hub path."""
if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
return None
ref, remote_path_str = match.groups()
ref = ref[1:] if ref else DEFAULT_REF
remote_path = Path(remote_path_str)
if remote_path.parts[0] != valid_prefix:
return None
if remote_path.suffix[1:] not in valid_suffixes:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Using Path with URLs is not recommended, because on Windows
# the backslash is used as the path separator, which can cause issues
# when working with URLs that use forward slashes as the path separator.
# Instead, use PurePosixPath to ensure that forward slashes are used as the
# path separator, regardless of the operating system.
full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
if not full_url.startswith(LANGCHAINHUB_REPO):
raise ValueError(f"Invalid hub path: {path}")
r = requests.get(full_url, timeout=5)
if r.status_code != 200:
raise ValueError(f"Could not find file at {full_url}")
with tempfile.TemporaryDirectory() as tmpdirname:
file = Path(tmpdirname) / remote_path.name
with open(file, "wb") as f:
f.write(r.content)
return loader(str(file), **kwargs)
| [
"langchain_core._api.deprecation.deprecated"
] | [((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGCHAIN_HUB_URL_BASE', LANGCHAINHUB_REPO + '{ref}/')\n", (490, 546), False, 'import os\n'), ((572, 619), 're.compile', 're.compile', (['"""lc(?P<ref>@[^:]+)?://(?P<path>.*)"""'], {}), "('lc(?P<ref>@[^:]+)?://(?P<path>.*)')\n", (582, 619), False, 'import re\n'), ((626, 638), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (633, 638), False, 'from typing import Any, Callable, Optional, Set, TypeVar, Union\n'), ((642, 822), 'langchain_core._api.deprecation.deprecated', 'deprecated', ([], {'since': '"""0.1.30"""', 'removal': '"""0.2"""', 'message': '"""Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead."""'}), "(since='0.1.30', removal='0.2', message=\n 'Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.'\n )\n", (652, 822), False, 'from langchain_core._api.deprecation import deprecated\n'), ((1317, 1338), 'pathlib.Path', 'Path', (['remote_path_str'], {}), '(remote_path_str)\n', (1321, 1338), False, 'from pathlib import Path, PurePosixPath\n'), ((2099, 2132), 'requests.get', 'requests.get', (['full_url'], {'timeout': '(5)'}), '(full_url, timeout=5)\n', (2111, 2132), False, 'import requests\n'), ((2234, 2263), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2261, 2263), False, 'import tempfile\n'), ((2294, 2310), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (2298, 2310), False, 'from pathlib import Path, PurePosixPath\n'), ((1947, 1973), 'pathlib.PurePosixPath', 'PurePosixPath', (['remote_path'], {}), '(remote_path)\n', (1960, 1973), False, 'from pathlib import Path, PurePosixPath\n')] |
"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
LANGCHAINHUB_REPO = "https://raw.githubusercontent.com/hwchase17/langchain-hub/"
URL_BASE = os.environ.get(
"LANGCHAIN_HUB_URL_BASE",
LANGCHAINHUB_REPO + "{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
@deprecated(
since="0.1.30",
removal="0.2",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Please use "
"https://smith.langchain.com/hub instead."
),
)
def try_load_from_hub(
path: Union[str, Path],
loader: Callable[[str], T],
valid_prefix: str,
valid_suffixes: Set[str],
**kwargs: Any,
) -> Optional[T]:
"""Load configuration from hub. Returns None if path is not a hub path."""
if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
return None
ref, remote_path_str = match.groups()
ref = ref[1:] if ref else DEFAULT_REF
remote_path = Path(remote_path_str)
if remote_path.parts[0] != valid_prefix:
return None
if remote_path.suffix[1:] not in valid_suffixes:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Using Path with URLs is not recommended, because on Windows
# the backslash is used as the path separator, which can cause issues
# when working with URLs that use forward slashes as the path separator.
# Instead, use PurePosixPath to ensure that forward slashes are used as the
# path separator, regardless of the operating system.
full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
if not full_url.startswith(LANGCHAINHUB_REPO):
raise ValueError(f"Invalid hub path: {path}")
r = requests.get(full_url, timeout=5)
if r.status_code != 200:
raise ValueError(f"Could not find file at {full_url}")
with tempfile.TemporaryDirectory() as tmpdirname:
file = Path(tmpdirname) / remote_path.name
with open(file, "wb") as f:
f.write(r.content)
return loader(str(file), **kwargs)
| [
"langchain_core._api.deprecation.deprecated"
] | [((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGCHAIN_HUB_URL_BASE', LANGCHAINHUB_REPO + '{ref}/')\n", (490, 546), False, 'import os\n'), ((572, 619), 're.compile', 're.compile', (['"""lc(?P<ref>@[^:]+)?://(?P<path>.*)"""'], {}), "('lc(?P<ref>@[^:]+)?://(?P<path>.*)')\n", (582, 619), False, 'import re\n'), ((626, 638), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (633, 638), False, 'from typing import Any, Callable, Optional, Set, TypeVar, Union\n'), ((642, 822), 'langchain_core._api.deprecation.deprecated', 'deprecated', ([], {'since': '"""0.1.30"""', 'removal': '"""0.2"""', 'message': '"""Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead."""'}), "(since='0.1.30', removal='0.2', message=\n 'Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.'\n )\n", (652, 822), False, 'from langchain_core._api.deprecation import deprecated\n'), ((1317, 1338), 'pathlib.Path', 'Path', (['remote_path_str'], {}), '(remote_path_str)\n', (1321, 1338), False, 'from pathlib import Path, PurePosixPath\n'), ((2099, 2132), 'requests.get', 'requests.get', (['full_url'], {'timeout': '(5)'}), '(full_url, timeout=5)\n', (2111, 2132), False, 'import requests\n'), ((2234, 2263), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2261, 2263), False, 'import tempfile\n'), ((2294, 2310), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (2298, 2310), False, 'from pathlib import Path, PurePosixPath\n'), ((1947, 1973), 'pathlib.PurePosixPath', 'PurePosixPath', (['remote_path'], {}), '(remote_path)\n', (1960, 1973), False, 'from pathlib import Path, PurePosixPath\n')] |
"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
LANGCHAINHUB_REPO = "https://raw.githubusercontent.com/hwchase17/langchain-hub/"
URL_BASE = os.environ.get(
"LANGCHAIN_HUB_URL_BASE",
LANGCHAINHUB_REPO + "{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
@deprecated(
since="0.1.30",
removal="0.2",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Please use "
"https://smith.langchain.com/hub instead."
),
)
def try_load_from_hub(
path: Union[str, Path],
loader: Callable[[str], T],
valid_prefix: str,
valid_suffixes: Set[str],
**kwargs: Any,
) -> Optional[T]:
"""Load configuration from hub. Returns None if path is not a hub path."""
if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
return None
ref, remote_path_str = match.groups()
ref = ref[1:] if ref else DEFAULT_REF
remote_path = Path(remote_path_str)
if remote_path.parts[0] != valid_prefix:
return None
if remote_path.suffix[1:] not in valid_suffixes:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Using Path with URLs is not recommended, because on Windows
# the backslash is used as the path separator, which can cause issues
# when working with URLs that use forward slashes as the path separator.
# Instead, use PurePosixPath to ensure that forward slashes are used as the
# path separator, regardless of the operating system.
full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
if not full_url.startswith(LANGCHAINHUB_REPO):
raise ValueError(f"Invalid hub path: {path}")
r = requests.get(full_url, timeout=5)
if r.status_code != 200:
raise ValueError(f"Could not find file at {full_url}")
with tempfile.TemporaryDirectory() as tmpdirname:
file = Path(tmpdirname) / remote_path.name
with open(file, "wb") as f:
f.write(r.content)
return loader(str(file), **kwargs)
| [
"langchain_core._api.deprecation.deprecated"
] | [((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGCHAIN_HUB_URL_BASE', LANGCHAINHUB_REPO + '{ref}/')\n", (490, 546), False, 'import os\n'), ((572, 619), 're.compile', 're.compile', (['"""lc(?P<ref>@[^:]+)?://(?P<path>.*)"""'], {}), "('lc(?P<ref>@[^:]+)?://(?P<path>.*)')\n", (582, 619), False, 'import re\n'), ((626, 638), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (633, 638), False, 'from typing import Any, Callable, Optional, Set, TypeVar, Union\n'), ((642, 822), 'langchain_core._api.deprecation.deprecated', 'deprecated', ([], {'since': '"""0.1.30"""', 'removal': '"""0.2"""', 'message': '"""Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead."""'}), "(since='0.1.30', removal='0.2', message=\n 'Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.'\n )\n", (652, 822), False, 'from langchain_core._api.deprecation import deprecated\n'), ((1317, 1338), 'pathlib.Path', 'Path', (['remote_path_str'], {}), '(remote_path_str)\n', (1321, 1338), False, 'from pathlib import Path, PurePosixPath\n'), ((2099, 2132), 'requests.get', 'requests.get', (['full_url'], {'timeout': '(5)'}), '(full_url, timeout=5)\n', (2111, 2132), False, 'import requests\n'), ((2234, 2263), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2261, 2263), False, 'import tempfile\n'), ((2294, 2310), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (2298, 2310), False, 'from pathlib import Path, PurePosixPath\n'), ((1947, 1973), 'pathlib.PurePosixPath', 'PurePosixPath', (['remote_path'], {}), '(remote_path)\n', (1960, 1973), False, 'from pathlib import Path, PurePosixPath\n')] |
"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
LANGCHAINHUB_REPO = "https://raw.githubusercontent.com/hwchase17/langchain-hub/"
URL_BASE = os.environ.get(
"LANGCHAIN_HUB_URL_BASE",
LANGCHAINHUB_REPO + "{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
@deprecated(
since="0.1.30",
removal="0.2",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Please use "
"https://smith.langchain.com/hub instead."
),
)
def try_load_from_hub(
path: Union[str, Path],
loader: Callable[[str], T],
valid_prefix: str,
valid_suffixes: Set[str],
**kwargs: Any,
) -> Optional[T]:
"""Load configuration from hub. Returns None if path is not a hub path."""
if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
return None
ref, remote_path_str = match.groups()
ref = ref[1:] if ref else DEFAULT_REF
remote_path = Path(remote_path_str)
if remote_path.parts[0] != valid_prefix:
return None
if remote_path.suffix[1:] not in valid_suffixes:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Using Path with URLs is not recommended, because on Windows
# the backslash is used as the path separator, which can cause issues
# when working with URLs that use forward slashes as the path separator.
# Instead, use PurePosixPath to ensure that forward slashes are used as the
# path separator, regardless of the operating system.
full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
if not full_url.startswith(LANGCHAINHUB_REPO):
raise ValueError(f"Invalid hub path: {path}")
r = requests.get(full_url, timeout=5)
if r.status_code != 200:
raise ValueError(f"Could not find file at {full_url}")
with tempfile.TemporaryDirectory() as tmpdirname:
file = Path(tmpdirname) / remote_path.name
with open(file, "wb") as f:
f.write(r.content)
return loader(str(file), **kwargs)
| [
"langchain_core._api.deprecation.deprecated"
] | [((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGCHAIN_HUB_URL_BASE', LANGCHAINHUB_REPO + '{ref}/')\n", (490, 546), False, 'import os\n'), ((572, 619), 're.compile', 're.compile', (['"""lc(?P<ref>@[^:]+)?://(?P<path>.*)"""'], {}), "('lc(?P<ref>@[^:]+)?://(?P<path>.*)')\n", (582, 619), False, 'import re\n'), ((626, 638), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (633, 638), False, 'from typing import Any, Callable, Optional, Set, TypeVar, Union\n'), ((642, 822), 'langchain_core._api.deprecation.deprecated', 'deprecated', ([], {'since': '"""0.1.30"""', 'removal': '"""0.2"""', 'message': '"""Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead."""'}), "(since='0.1.30', removal='0.2', message=\n 'Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.'\n )\n", (652, 822), False, 'from langchain_core._api.deprecation import deprecated\n'), ((1317, 1338), 'pathlib.Path', 'Path', (['remote_path_str'], {}), '(remote_path_str)\n', (1321, 1338), False, 'from pathlib import Path, PurePosixPath\n'), ((2099, 2132), 'requests.get', 'requests.get', (['full_url'], {'timeout': '(5)'}), '(full_url, timeout=5)\n', (2111, 2132), False, 'import requests\n'), ((2234, 2263), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2261, 2263), False, 'import tempfile\n'), ((2294, 2310), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (2298, 2310), False, 'from pathlib import Path, PurePosixPath\n'), ((1947, 1973), 'pathlib.PurePosixPath', 'PurePosixPath', (['remote_path'], {}), '(remote_path)\n', (1960, 1973), False, 'from pathlib import Path, PurePosixPath\n')] |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.loading import try_load_from_hub
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="0.2.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="0.2.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [
"langchain_core._api.deprecated",
"langchain_core.utils.loading.try_load_from_hub"
] | [((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api import deprecated\n'), ((3172, 3208), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (3182, 3208), False, 'from langchain_core._api import deprecated\n'), ((3632, 3704), 'langchain_core.utils.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_agent_from_file', '"""agents"""', 'valid_suffixes'], {}), "(path, _load_agent_from_file, 'agents', valid_suffixes)\n", (3649, 3704), False, 'from langchain_core.utils.loading import try_load_from_hub\n'), ((4092, 4102), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (4096, 4102), False, 'from pathlib import Path\n'), ((4270, 4282), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4279, 4282), False, 'import json\n'), ((4385, 4402), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4399, 4402), False, 'import yaml\n')] |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.loading import try_load_from_hub
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="0.2.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="0.2.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [
"langchain_core._api.deprecated",
"langchain_core.utils.loading.try_load_from_hub"
] | [((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api import deprecated\n'), ((3172, 3208), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (3182, 3208), False, 'from langchain_core._api import deprecated\n'), ((3632, 3704), 'langchain_core.utils.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_agent_from_file', '"""agents"""', 'valid_suffixes'], {}), "(path, _load_agent_from_file, 'agents', valid_suffixes)\n", (3649, 3704), False, 'from langchain_core.utils.loading import try_load_from_hub\n'), ((4092, 4102), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (4096, 4102), False, 'from pathlib import Path\n'), ((4270, 4282), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4279, 4282), False, 'import json\n'), ((4385, 4402), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4399, 4402), False, 'import yaml\n')] |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.loading import try_load_from_hub
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="0.2.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="0.2.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [
"langchain_core._api.deprecated",
"langchain_core.utils.loading.try_load_from_hub"
] | [((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api import deprecated\n'), ((3172, 3208), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (3182, 3208), False, 'from langchain_core._api import deprecated\n'), ((3632, 3704), 'langchain_core.utils.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_agent_from_file', '"""agents"""', 'valid_suffixes'], {}), "(path, _load_agent_from_file, 'agents', valid_suffixes)\n", (3649, 3704), False, 'from langchain_core.utils.loading import try_load_from_hub\n'), ((4092, 4102), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (4096, 4102), False, 'from pathlib import Path\n'), ((4270, 4282), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4279, 4282), False, 'import json\n'), ((4385, 4402), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4399, 4402), False, 'import yaml\n')] |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.loading import try_load_from_hub
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="0.2.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="0.2.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [
"langchain_core._api.deprecated",
"langchain_core.utils.loading.try_load_from_hub"
] | [((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api import deprecated\n'), ((3172, 3208), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (3182, 3208), False, 'from langchain_core._api import deprecated\n'), ((3632, 3704), 'langchain_core.utils.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_agent_from_file', '"""agents"""', 'valid_suffixes'], {}), "(path, _load_agent_from_file, 'agents', valid_suffixes)\n", (3649, 3704), False, 'from langchain_core.utils.loading import try_load_from_hub\n'), ((4092, 4102), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (4096, 4102), False, 'from pathlib import Path\n'), ((4270, 4282), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4279, 4282), False, 'import json\n'), ((4385, 4402), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4399, 4402), False, 'import yaml\n')] |
"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.utils import get_colored_text
from langchain_core.utils.formatting import formatter
from langchain_core.utils.interactive_env import is_interactive_env
def jinja2_formatter(template: str, **kwargs: Any) -> str:
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution."
)
# This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
# Please treat this sand-boxing as a best-effort approach rather than
# a guarantee of security.
# We recommend to never use jinja2 templates with untrusted inputs.
# https://jinja.palletsprojects.com/en/3.1.x/sandbox/
# approach not a guarantee of security.
return SandboxedEnvironment().from_string(template).render(**kwargs)
def validate_jinja2(template: str, input_variables: List[str]) -> None:
"""
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ""
if missing_variables:
warning_message += f"Missing variables: {missing_variables} "
if extra_variables:
warning_message += f"Extra variables: {extra_variables}"
if warning_message:
warnings.warn(warning_message.strip())
def _get_jinja2_variables_from_template(template: str) -> Set[str]:
try:
from jinja2 import Environment, meta
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
)
env = Environment()
ast = env.parse(template)
variables = meta.find_undeclared_variables(ast)
return variables
DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = {
"f-string": formatter.format,
"jinja2": jinja2_formatter,
}
DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = {
"f-string": formatter.validate_input_variables,
"jinja2": validate_jinja2,
}
def check_valid_template(
template: str, template_format: str, input_variables: List[str]
) -> None:
"""Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
"Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}."
) from exc
def get_template_variables(template: str, template_format: str) -> List[str]:
"""Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == "jinja2":
# Get the variables for the template
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == "f-string":
input_variables = {
v for _, v, _, _ in Formatter().parse(template) if v is not None
}
else:
raise ValueError(f"Unsupported template format: {template_format}")
return sorted(input_variables)
class StringPromptTemplate(BasePromptTemplate, ABC):
"""String prompt that exposes the format method, returning a prompt."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
def pretty_repr(self, html: bool = False) -> str:
# TODO: handle partials
dummy_vars = {
input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
}
if html:
dummy_vars = {
k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
}
return self.format(**dummy_vars)
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
| [
"langchain_core.utils.get_colored_text",
"langchain_core.utils.interactive_env.is_interactive_env"
] | [((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((6074, 6103), 'langchain_core.utils.get_colored_text', 'get_colored_text', (['v', '"""yellow"""'], {}), "(v, 'yellow')\n", (6090, 6103), False, 'from langchain_core.utils import get_colored_text\n'), ((1946, 1968), 'jinja2.sandbox.SandboxedEnvironment', 'SandboxedEnvironment', ([], {}), '()\n', (1966, 1968), False, 'from jinja2.sandbox import SandboxedEnvironment\n'), ((6263, 6283), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (6281, 6283), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((5171, 5182), 'string.Formatter', 'Formatter', ([], {}), '()\n', (5180, 5182), False, 'from string import Formatter\n')] |
"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.utils import get_colored_text
from langchain_core.utils.formatting import formatter
from langchain_core.utils.interactive_env import is_interactive_env
def jinja2_formatter(template: str, **kwargs: Any) -> str:
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution."
)
# This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
# Please treat this sand-boxing as a best-effort approach rather than
# a guarantee of security.
# We recommend to never use jinja2 templates with untrusted inputs.
# https://jinja.palletsprojects.com/en/3.1.x/sandbox/
# approach not a guarantee of security.
return SandboxedEnvironment().from_string(template).render(**kwargs)
def validate_jinja2(template: str, input_variables: List[str]) -> None:
"""
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ""
if missing_variables:
warning_message += f"Missing variables: {missing_variables} "
if extra_variables:
warning_message += f"Extra variables: {extra_variables}"
if warning_message:
warnings.warn(warning_message.strip())
def _get_jinja2_variables_from_template(template: str) -> Set[str]:
try:
from jinja2 import Environment, meta
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
)
env = Environment()
ast = env.parse(template)
variables = meta.find_undeclared_variables(ast)
return variables
DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = {
"f-string": formatter.format,
"jinja2": jinja2_formatter,
}
DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = {
"f-string": formatter.validate_input_variables,
"jinja2": validate_jinja2,
}
def check_valid_template(
template: str, template_format: str, input_variables: List[str]
) -> None:
"""Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
"Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}."
) from exc
def get_template_variables(template: str, template_format: str) -> List[str]:
"""Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == "jinja2":
# Get the variables for the template
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == "f-string":
input_variables = {
v for _, v, _, _ in Formatter().parse(template) if v is not None
}
else:
raise ValueError(f"Unsupported template format: {template_format}")
return sorted(input_variables)
class StringPromptTemplate(BasePromptTemplate, ABC):
"""String prompt that exposes the format method, returning a prompt."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
def pretty_repr(self, html: bool = False) -> str:
# TODO: handle partials
dummy_vars = {
input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
}
if html:
dummy_vars = {
k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
}
return self.format(**dummy_vars)
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
| [
"langchain_core.utils.get_colored_text",
"langchain_core.utils.interactive_env.is_interactive_env"
] | [((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((6074, 6103), 'langchain_core.utils.get_colored_text', 'get_colored_text', (['v', '"""yellow"""'], {}), "(v, 'yellow')\n", (6090, 6103), False, 'from langchain_core.utils import get_colored_text\n'), ((1946, 1968), 'jinja2.sandbox.SandboxedEnvironment', 'SandboxedEnvironment', ([], {}), '()\n', (1966, 1968), False, 'from jinja2.sandbox import SandboxedEnvironment\n'), ((6263, 6283), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (6281, 6283), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((5171, 5182), 'string.Formatter', 'Formatter', ([], {}), '()\n', (5180, 5182), False, 'from string import Formatter\n')] |
"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.utils import get_colored_text
from langchain_core.utils.formatting import formatter
from langchain_core.utils.interactive_env import is_interactive_env
def jinja2_formatter(template: str, **kwargs: Any) -> str:
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution."
)
# This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
# Please treat this sand-boxing as a best-effort approach rather than
# a guarantee of security.
# We recommend to never use jinja2 templates with untrusted inputs.
# https://jinja.palletsprojects.com/en/3.1.x/sandbox/
# approach not a guarantee of security.
return SandboxedEnvironment().from_string(template).render(**kwargs)
def validate_jinja2(template: str, input_variables: List[str]) -> None:
"""
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ""
if missing_variables:
warning_message += f"Missing variables: {missing_variables} "
if extra_variables:
warning_message += f"Extra variables: {extra_variables}"
if warning_message:
warnings.warn(warning_message.strip())
def _get_jinja2_variables_from_template(template: str) -> Set[str]:
try:
from jinja2 import Environment, meta
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
)
env = Environment()
ast = env.parse(template)
variables = meta.find_undeclared_variables(ast)
return variables
DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = {
"f-string": formatter.format,
"jinja2": jinja2_formatter,
}
DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = {
"f-string": formatter.validate_input_variables,
"jinja2": validate_jinja2,
}
def check_valid_template(
template: str, template_format: str, input_variables: List[str]
) -> None:
"""Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
"Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}."
) from exc
def get_template_variables(template: str, template_format: str) -> List[str]:
"""Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == "jinja2":
# Get the variables for the template
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == "f-string":
input_variables = {
v for _, v, _, _ in Formatter().parse(template) if v is not None
}
else:
raise ValueError(f"Unsupported template format: {template_format}")
return sorted(input_variables)
class StringPromptTemplate(BasePromptTemplate, ABC):
"""String prompt that exposes the format method, returning a prompt."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
def pretty_repr(self, html: bool = False) -> str:
# TODO: handle partials
dummy_vars = {
input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
}
if html:
dummy_vars = {
k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
}
return self.format(**dummy_vars)
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
| [
"langchain_core.utils.get_colored_text",
"langchain_core.utils.interactive_env.is_interactive_env"
] | [((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((6074, 6103), 'langchain_core.utils.get_colored_text', 'get_colored_text', (['v', '"""yellow"""'], {}), "(v, 'yellow')\n", (6090, 6103), False, 'from langchain_core.utils import get_colored_text\n'), ((1946, 1968), 'jinja2.sandbox.SandboxedEnvironment', 'SandboxedEnvironment', ([], {}), '()\n', (1966, 1968), False, 'from jinja2.sandbox import SandboxedEnvironment\n'), ((6263, 6283), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (6281, 6283), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((5171, 5182), 'string.Formatter', 'Formatter', ([], {}), '()\n', (5180, 5182), False, 'from string import Formatter\n')] |
"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.utils import get_colored_text
from langchain_core.utils.formatting import formatter
from langchain_core.utils.interactive_env import is_interactive_env
def jinja2_formatter(template: str, **kwargs: Any) -> str:
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution."
)
# This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
# Please treat this sand-boxing as a best-effort approach rather than
# a guarantee of security.
# We recommend to never use jinja2 templates with untrusted inputs.
# https://jinja.palletsprojects.com/en/3.1.x/sandbox/
# approach not a guarantee of security.
return SandboxedEnvironment().from_string(template).render(**kwargs)
def validate_jinja2(template: str, input_variables: List[str]) -> None:
"""
Validate that the input variables are valid for the template.
Issues a warning if missing or extra variables are found.
Args:
template: The template string.
input_variables: The input variables.
"""
input_variables_set = set(input_variables)
valid_variables = _get_jinja2_variables_from_template(template)
missing_variables = valid_variables - input_variables_set
extra_variables = input_variables_set - valid_variables
warning_message = ""
if missing_variables:
warning_message += f"Missing variables: {missing_variables} "
if extra_variables:
warning_message += f"Extra variables: {extra_variables}"
if warning_message:
warnings.warn(warning_message.strip())
def _get_jinja2_variables_from_template(template: str) -> Set[str]:
try:
from jinja2 import Environment, meta
except ImportError:
raise ImportError(
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
)
env = Environment()
ast = env.parse(template)
variables = meta.find_undeclared_variables(ast)
return variables
DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = {
"f-string": formatter.format,
"jinja2": jinja2_formatter,
}
DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = {
"f-string": formatter.validate_input_variables,
"jinja2": validate_jinja2,
}
def check_valid_template(
template: str, template_format: str, input_variables: List[str]
) -> None:
"""Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
"Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}."
) from exc
def get_template_variables(template: str, template_format: str) -> List[str]:
"""Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == "jinja2":
# Get the variables for the template
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == "f-string":
input_variables = {
v for _, v, _, _ in Formatter().parse(template) if v is not None
}
else:
raise ValueError(f"Unsupported template format: {template_format}")
return sorted(input_variables)
class StringPromptTemplate(BasePromptTemplate, ABC):
"""String prompt that exposes the format method, returning a prompt."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
def pretty_repr(self, html: bool = False) -> str:
# TODO: handle partials
dummy_vars = {
input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
}
if html:
dummy_vars = {
k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
}
return self.format(**dummy_vars)
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
| [
"langchain_core.utils.get_colored_text",
"langchain_core.utils.interactive_env.is_interactive_env"
] | [((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((6074, 6103), 'langchain_core.utils.get_colored_text', 'get_colored_text', (['v', '"""yellow"""'], {}), "(v, 'yellow')\n", (6090, 6103), False, 'from langchain_core.utils import get_colored_text\n'), ((1946, 1968), 'jinja2.sandbox.SandboxedEnvironment', 'SandboxedEnvironment', ([], {}), '()\n', (1966, 1968), False, 'from jinja2.sandbox import SandboxedEnvironment\n'), ((6263, 6283), 'langchain_core.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (6281, 6283), False, 'from langchain_core.utils.interactive_env import is_interactive_env\n'), ((5171, 5182), 'string.Formatter', 'Formatter', ([], {}), '()\n', (5180, 5182), False, 'from string import Formatter\n')] |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain.utils.interactive_env import is_interactive_env
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_python_tool_PythonAstREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_python_tool_PythonREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def __getattr__(name: str) -> Any:
if name == "PythonAstREPLTool":
return _import_python_tool_PythonAstREPLTool()
elif name == "PythonREPLTool":
return _import_python_tool_PythonREPLTool()
else:
from langchain_community import tools
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing tools from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.tools import {name}`.\n\n"
"To install langchain-community run "
"`pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(tools, name)
__all__ = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"SearchAPIResults",
"SearchAPIRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MerriamWebsterQueryRun",
"MetaphorSearchResults",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SteamWebAPIQueryRun",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"format_tool_to_openai_function",
"tool",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')] |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain.utils.interactive_env import is_interactive_env
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_python_tool_PythonAstREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_python_tool_PythonREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def __getattr__(name: str) -> Any:
if name == "PythonAstREPLTool":
return _import_python_tool_PythonAstREPLTool()
elif name == "PythonREPLTool":
return _import_python_tool_PythonREPLTool()
else:
from langchain_community import tools
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing tools from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.tools import {name}`.\n\n"
"To install langchain-community run "
"`pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(tools, name)
__all__ = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"SearchAPIResults",
"SearchAPIRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MerriamWebsterQueryRun",
"MetaphorSearchResults",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SteamWebAPIQueryRun",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"format_tool_to_openai_function",
"tool",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')] |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain.utils.interactive_env import is_interactive_env
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_python_tool_PythonAstREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_python_tool_PythonREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def __getattr__(name: str) -> Any:
if name == "PythonAstREPLTool":
return _import_python_tool_PythonAstREPLTool()
elif name == "PythonREPLTool":
return _import_python_tool_PythonREPLTool()
else:
from langchain_community import tools
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing tools from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.tools import {name}`.\n\n"
"To install langchain-community run "
"`pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(tools, name)
__all__ = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"SearchAPIResults",
"SearchAPIRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MerriamWebsterQueryRun",
"MetaphorSearchResults",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SteamWebAPIQueryRun",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"format_tool_to_openai_function",
"tool",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')] |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain.utils.interactive_env import is_interactive_env
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_python_tool_PythonAstREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_python_tool_PythonREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def __getattr__(name: str) -> Any:
if name == "PythonAstREPLTool":
return _import_python_tool_PythonAstREPLTool()
elif name == "PythonREPLTool":
return _import_python_tool_PythonREPLTool()
else:
from langchain_community import tools
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing tools from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.tools import {name}`.\n\n"
"To install langchain-community run "
"`pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(tools, name)
__all__ = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"SearchAPIResults",
"SearchAPIRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MerriamWebsterQueryRun",
"MetaphorSearchResults",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SteamWebAPIQueryRun",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"format_tool_to_openai_function",
"tool",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')] |
from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text."""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
type: Literal["Document"] = "Document"
def __init__(self, page_content: str, **kwargs: Any) -> None:
"""Pass page_content in as positional or named arg."""
super().__init__(page_content=page_content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "document"]
| [
"langchain_core.pydantic_v1.Field"
] | [((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] |
from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text."""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
type: Literal["Document"] = "Document"
def __init__(self, page_content: str, **kwargs: Any) -> None:
"""Pass page_content in as positional or named arg."""
super().__init__(page_content=page_content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "document"]
| [
"langchain_core.pydantic_v1.Field"
] | [((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] |
from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text."""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
type: Literal["Document"] = "Document"
def __init__(self, page_content: str, **kwargs: Any) -> None:
"""Pass page_content in as positional or named arg."""
super().__init__(page_content=page_content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "document"]
| [
"langchain_core.pydantic_v1.Field"
] | [((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] |
from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text."""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
type: Literal["Document"] = "Document"
def __init__(self, page_content: str, **kwargs: Any) -> None:
"""Pass page_content in as positional or named arg."""
super().__init__(page_content=page_content, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "document"]
| [
"langchain_core.pydantic_v1.Field"
] | [((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] |
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import deprecated
from langchain_core.outputs import LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
@deprecated("0.1.0", alternative="Use string instead.", removal="0.2.0")
def RunTypeEnum() -> Type[RunTypeEnumDep]:
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Base(BaseModel):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="0.2.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class BaseRun(BaseModel):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: Optional[Dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: Dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: List[str]
response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = None
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
# Begin V2 API Schemas
class Run(BaseRunV2):
"""Run schema for the V2 API in the Tracer."""
execution_order: int
child_execution_order: int
child_runs: List[Run] = Field(default_factory=list)
tags: Optional[List[str]] = Field(default_factory=list)
events: List[Dict[str, Any]] = Field(default_factory=list)
trace_id: Optional[UUID] = None
dotted_order: Optional[str] = None
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("events") is None:
values["events"] = []
return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
Run.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
| [
"langchain_core._api.deprecated",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (791, 817), False, 'from langchain_core._api import deprecated\n'), ((1060, 1096), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1070, 1096), False, 'from langchain_core._api import deprecated\n'), ((1194, 1230), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1204, 1230), False, 'from langchain_core._api import deprecated\n'), ((1325, 1361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1335, 1361), False, 'from langchain_core._api import deprecated\n'), ((1472, 1508), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1482, 1508), False, 'from langchain_core._api import deprecated\n'), ((1615, 1670), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (1625, 1670), False, 'from langchain_core._api import deprecated\n'), ((2131, 2186), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2141, 2186), False, 'from langchain_core._api import deprecated\n'), ((2306, 2361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2316, 2361), False, 'from langchain_core._api import deprecated\n'), ((2688, 2743), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2698, 2743), False, 'from langchain_core._api import deprecated\n'), ((586, 727), 'warnings.warn', 'warnings.warn', (['"""RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."""', 'DeprecationWarning'], {}), '(\n "RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."\n , DeprecationWarning)\n', (599, 727), False, 'import warnings\n'), ((935, 982), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (940, 982), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1816, 1863), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1821, 1863), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1898, 1945), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1903, 1945), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2525, 2552), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2530, 2552), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2592, 2619), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2597, 2619), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2657, 2684), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2662, 2684), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2902, 2929), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2907, 2929), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2969, 2996), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2974, 2996), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3034, 3061), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3039, 3061), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3247, 3274), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3252, 3274), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3307, 3334), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3312, 3334), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3370, 3397), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3375, 3397), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3479, 3503), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3493, 3503), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n')] |
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import deprecated
from langchain_core.outputs import LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
@deprecated("0.1.0", alternative="Use string instead.", removal="0.2.0")
def RunTypeEnum() -> Type[RunTypeEnumDep]:
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Base(BaseModel):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="0.2.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class BaseRun(BaseModel):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: Optional[Dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: Dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: List[str]
response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = None
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
# Begin V2 API Schemas
class Run(BaseRunV2):
"""Run schema for the V2 API in the Tracer."""
execution_order: int
child_execution_order: int
child_runs: List[Run] = Field(default_factory=list)
tags: Optional[List[str]] = Field(default_factory=list)
events: List[Dict[str, Any]] = Field(default_factory=list)
trace_id: Optional[UUID] = None
dotted_order: Optional[str] = None
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("events") is None:
values["events"] = []
return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
Run.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
| [
"langchain_core._api.deprecated",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (791, 817), False, 'from langchain_core._api import deprecated\n'), ((1060, 1096), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1070, 1096), False, 'from langchain_core._api import deprecated\n'), ((1194, 1230), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1204, 1230), False, 'from langchain_core._api import deprecated\n'), ((1325, 1361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1335, 1361), False, 'from langchain_core._api import deprecated\n'), ((1472, 1508), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1482, 1508), False, 'from langchain_core._api import deprecated\n'), ((1615, 1670), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (1625, 1670), False, 'from langchain_core._api import deprecated\n'), ((2131, 2186), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2141, 2186), False, 'from langchain_core._api import deprecated\n'), ((2306, 2361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2316, 2361), False, 'from langchain_core._api import deprecated\n'), ((2688, 2743), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2698, 2743), False, 'from langchain_core._api import deprecated\n'), ((586, 727), 'warnings.warn', 'warnings.warn', (['"""RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."""', 'DeprecationWarning'], {}), '(\n "RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."\n , DeprecationWarning)\n', (599, 727), False, 'import warnings\n'), ((935, 982), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (940, 982), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1816, 1863), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1821, 1863), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1898, 1945), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1903, 1945), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2525, 2552), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2530, 2552), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2592, 2619), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2597, 2619), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2657, 2684), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2662, 2684), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2902, 2929), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2907, 2929), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2969, 2996), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2974, 2996), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3034, 3061), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3039, 3061), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3247, 3274), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3252, 3274), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3307, 3334), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3312, 3334), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3370, 3397), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3375, 3397), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3479, 3503), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3493, 3503), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n')] |
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import deprecated
from langchain_core.outputs import LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
@deprecated("0.1.0", alternative="Use string instead.", removal="0.2.0")
def RunTypeEnum() -> Type[RunTypeEnumDep]:
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Base(BaseModel):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="0.2.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class BaseRun(BaseModel):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: Optional[Dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: Dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: List[str]
response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = None
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
# Begin V2 API Schemas
class Run(BaseRunV2):
"""Run schema for the V2 API in the Tracer."""
execution_order: int
child_execution_order: int
child_runs: List[Run] = Field(default_factory=list)
tags: Optional[List[str]] = Field(default_factory=list)
events: List[Dict[str, Any]] = Field(default_factory=list)
trace_id: Optional[UUID] = None
dotted_order: Optional[str] = None
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("events") is None:
values["events"] = []
return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
Run.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
| [
"langchain_core._api.deprecated",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (791, 817), False, 'from langchain_core._api import deprecated\n'), ((1060, 1096), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1070, 1096), False, 'from langchain_core._api import deprecated\n'), ((1194, 1230), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1204, 1230), False, 'from langchain_core._api import deprecated\n'), ((1325, 1361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1335, 1361), False, 'from langchain_core._api import deprecated\n'), ((1472, 1508), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1482, 1508), False, 'from langchain_core._api import deprecated\n'), ((1615, 1670), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (1625, 1670), False, 'from langchain_core._api import deprecated\n'), ((2131, 2186), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2141, 2186), False, 'from langchain_core._api import deprecated\n'), ((2306, 2361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2316, 2361), False, 'from langchain_core._api import deprecated\n'), ((2688, 2743), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2698, 2743), False, 'from langchain_core._api import deprecated\n'), ((586, 727), 'warnings.warn', 'warnings.warn', (['"""RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."""', 'DeprecationWarning'], {}), '(\n "RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."\n , DeprecationWarning)\n', (599, 727), False, 'import warnings\n'), ((935, 982), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (940, 982), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1816, 1863), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1821, 1863), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1898, 1945), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1903, 1945), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2525, 2552), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2530, 2552), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2592, 2619), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2597, 2619), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2657, 2684), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2662, 2684), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2902, 2929), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2907, 2929), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2969, 2996), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2974, 2996), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3034, 3061), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3039, 3061), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3247, 3274), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3252, 3274), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3307, 3334), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3312, 3334), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3370, 3397), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3375, 3397), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3479, 3503), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3493, 3503), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n')] |
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import deprecated
from langchain_core.outputs import LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
@deprecated("0.1.0", alternative="Use string instead.", removal="0.2.0")
def RunTypeEnum() -> Type[RunTypeEnumDep]:
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Base(BaseModel):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="0.2.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="0.2.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class BaseRun(BaseModel):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: Optional[Dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: Dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: List[str]
response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = None
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="0.2.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
# Begin V2 API Schemas
class Run(BaseRunV2):
"""Run schema for the V2 API in the Tracer."""
execution_order: int
child_execution_order: int
child_runs: List[Run] = Field(default_factory=list)
tags: Optional[List[str]] = Field(default_factory=list)
events: List[Dict[str, Any]] = Field(default_factory=list)
trace_id: Optional[UUID] = None
dotted_order: Optional[str] = None
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("events") is None:
values["events"] = []
return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
Run.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
| [
"langchain_core._api.deprecated",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (791, 817), False, 'from langchain_core._api import deprecated\n'), ((1060, 1096), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1070, 1096), False, 'from langchain_core._api import deprecated\n'), ((1194, 1230), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1204, 1230), False, 'from langchain_core._api import deprecated\n'), ((1325, 1361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1335, 1361), False, 'from langchain_core._api import deprecated\n'), ((1472, 1508), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1482, 1508), False, 'from langchain_core._api import deprecated\n'), ((1615, 1670), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (1625, 1670), False, 'from langchain_core._api import deprecated\n'), ((2131, 2186), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2141, 2186), False, 'from langchain_core._api import deprecated\n'), ((2306, 2361), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2316, 2361), False, 'from langchain_core._api import deprecated\n'), ((2688, 2743), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Run"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Run', removal='0.2.0')\n", (2698, 2743), False, 'from langchain_core._api import deprecated\n'), ((586, 727), 'warnings.warn', 'warnings.warn', (['"""RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."""', 'DeprecationWarning'], {}), '(\n "RunTypeEnum is deprecated. Please directly use a string instead (e.g. \'llm\', \'chain\', \'tool\')."\n , DeprecationWarning)\n', (599, 727), False, 'import warnings\n'), ((935, 982), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (940, 982), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1816, 1863), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1821, 1863), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((1898, 1945), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'datetime.datetime.utcnow'}), '(default_factory=datetime.datetime.utcnow)\n', (1903, 1945), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2525, 2552), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2530, 2552), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2592, 2619), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2597, 2619), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2657, 2684), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2662, 2684), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2902, 2929), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2907, 2929), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((2969, 2996), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2974, 2996), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3034, 3061), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3039, 3061), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3247, 3274), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3252, 3274), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3307, 3334), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3312, 3334), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3370, 3397), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (3375, 3397), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n'), ((3479, 3503), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3493, 3503), False, 'from langchain_core.pydantic_v1 import BaseModel, Field, root_validator\n')] |
"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
template_format = config.get("template_format", "f-string")
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
def _load_chat_prompt(config: Dict) -> ChatPromptTemplate:
"""Load chat prompt from config"""
messages = config.pop("messages")
template = messages[0]["prompt"].pop("template") if messages else None
config.pop("input_variables")
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
"chat": _load_chat_prompt,
}
| [
"langchain_core.utils.try_load_from_hub",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.string.StrOutputParser",
"langchain_core.prompts.few_shot.FewShotPromptTemplate",
"langchain_core.prompts.prompt.PromptTemplate"
] | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import FewShotPromptTemplate\n'), ((4733, 4757), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {}), '(**config)\n', (4747, 4757), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((6137, 6198), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template'}), '(template=template, **config)\n', (6169, 6198), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((4921, 5007), 'langchain_core.utils.try_load_from_hub', 'try_load_from_hub', (['path', '_load_prompt_from_file', '"""prompts"""', "{'py', 'json', 'yaml'}"], {}), "(path, _load_prompt_from_file, 'prompts', {'py', 'json',\n 'yaml'})\n", (4938, 5007), False, 'from langchain_core.utils import try_load_from_hub\n'), ((5295, 5305), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (5299, 5305), False, 'from pathlib import Path\n'), ((2964, 2990), 'langchain_core.output_parsers.string.StrOutputParser', 'StrOutputParser', ([], {}), '(**_config)\n', (2979, 2990), False, 'from langchain_core.output_parsers.string import StrOutputParser\n'), ((5470, 5482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5479, 5482), False, 'import json\n'), ((5599, 5616), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import yaml\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2329, 2346), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2343, 2346), False, 'import yaml\n')] |
"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
template_format = config.get("template_format", "f-string")
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
def _load_chat_prompt(config: Dict) -> ChatPromptTemplate:
"""Load chat prompt from config"""
messages = config.pop("messages")
template = messages[0]["prompt"].pop("template") if messages else None
config.pop("input_variables")
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
"chat": _load_chat_prompt,
}
| [
"langchain_core.utils.try_load_from_hub",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.string.StrOutputParser",
"langchain_core.prompts.few_shot.FewShotPromptTemplate",
"langchain_core.prompts.prompt.PromptTemplate"
] | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import FewShotPromptTemplate\n'), ((4733, 4757), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {}), '(**config)\n', (4747, 4757), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((6137, 6198), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template'}), '(template=template, **config)\n', (6169, 6198), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((4921, 5007), 'langchain_core.utils.try_load_from_hub', 'try_load_from_hub', (['path', '_load_prompt_from_file', '"""prompts"""', "{'py', 'json', 'yaml'}"], {}), "(path, _load_prompt_from_file, 'prompts', {'py', 'json',\n 'yaml'})\n", (4938, 5007), False, 'from langchain_core.utils import try_load_from_hub\n'), ((5295, 5305), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (5299, 5305), False, 'from pathlib import Path\n'), ((2964, 2990), 'langchain_core.output_parsers.string.StrOutputParser', 'StrOutputParser', ([], {}), '(**_config)\n', (2979, 2990), False, 'from langchain_core.output_parsers.string import StrOutputParser\n'), ((5470, 5482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5479, 5482), False, 'import json\n'), ((5599, 5616), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import yaml\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2329, 2346), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2343, 2346), False, 'import yaml\n')] |
"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
template_format = config.get("template_format", "f-string")
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
def _load_chat_prompt(config: Dict) -> ChatPromptTemplate:
"""Load chat prompt from config"""
messages = config.pop("messages")
template = messages[0]["prompt"].pop("template") if messages else None
config.pop("input_variables")
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
"chat": _load_chat_prompt,
}
| [
"langchain_core.utils.try_load_from_hub",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.string.StrOutputParser",
"langchain_core.prompts.few_shot.FewShotPromptTemplate",
"langchain_core.prompts.prompt.PromptTemplate"
] | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import FewShotPromptTemplate\n'), ((4733, 4757), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {}), '(**config)\n', (4747, 4757), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((6137, 6198), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template'}), '(template=template, **config)\n', (6169, 6198), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((4921, 5007), 'langchain_core.utils.try_load_from_hub', 'try_load_from_hub', (['path', '_load_prompt_from_file', '"""prompts"""', "{'py', 'json', 'yaml'}"], {}), "(path, _load_prompt_from_file, 'prompts', {'py', 'json',\n 'yaml'})\n", (4938, 5007), False, 'from langchain_core.utils import try_load_from_hub\n'), ((5295, 5305), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (5299, 5305), False, 'from pathlib import Path\n'), ((2964, 2990), 'langchain_core.output_parsers.string.StrOutputParser', 'StrOutputParser', ([], {}), '(**_config)\n', (2979, 2990), False, 'from langchain_core.output_parsers.string import StrOutputParser\n'), ((5470, 5482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5479, 5482), False, 'import json\n'), ((5599, 5616), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import yaml\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2329, 2346), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2343, 2346), False, 'import yaml\n')] |
"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
template_format = config.get("template_format", "f-string")
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
def _load_chat_prompt(config: Dict) -> ChatPromptTemplate:
"""Load chat prompt from config"""
messages = config.pop("messages")
template = messages[0]["prompt"].pop("template") if messages else None
config.pop("input_variables")
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
"chat": _load_chat_prompt,
}
| [
"langchain_core.utils.try_load_from_hub",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.string.StrOutputParser",
"langchain_core.prompts.few_shot.FewShotPromptTemplate",
"langchain_core.prompts.prompt.PromptTemplate"
] | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import FewShotPromptTemplate\n'), ((4733, 4757), 'langchain_core.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {}), '(**config)\n', (4747, 4757), False, 'from langchain_core.prompts.prompt import PromptTemplate\n'), ((6137, 6198), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template'}), '(template=template, **config)\n', (6169, 6198), False, 'from langchain_core.prompts.chat import ChatPromptTemplate\n'), ((4921, 5007), 'langchain_core.utils.try_load_from_hub', 'try_load_from_hub', (['path', '_load_prompt_from_file', '"""prompts"""', "{'py', 'json', 'yaml'}"], {}), "(path, _load_prompt_from_file, 'prompts', {'py', 'json',\n 'yaml'})\n", (4938, 5007), False, 'from langchain_core.utils import try_load_from_hub\n'), ((5295, 5305), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (5299, 5305), False, 'from pathlib import Path\n'), ((2964, 2990), 'langchain_core.output_parsers.string.StrOutputParser', 'StrOutputParser', ([], {}), '(**_config)\n', (2979, 2990), False, 'from langchain_core.output_parsers.string import StrOutputParser\n'), ((5470, 5482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5479, 5482), False, 'import json\n'), ((5599, 5616), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import yaml\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2329, 2346), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2343, 2346), False, 'import yaml\n')] |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
from langchain.tools import Tool
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = retriever.get_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = await retriever.aget_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
Returns:
Tool class to pass to an agent
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.format_document",
"langchain.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')] |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
from langchain.tools import Tool
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = retriever.get_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = await retriever.aget_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
Returns:
Tool class to pass to an agent
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.format_document",
"langchain.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')] |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
from langchain.tools import Tool
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = retriever.get_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = await retriever.aget_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
Returns:
Tool class to pass to an agent
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.format_document",
"langchain.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')] |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
from langchain.tools import Tool
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = retriever.get_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
) -> str:
docs = await retriever.aget_relevant_documents(query, callbacks=callbacks)
return document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
Returns:
Tool class to pass to an agent
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
)
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.format_document",
"langchain.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')] |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.
Args:
tools: list of tools the agent can choose from
llm_chain: The LLMChain to call to predict the next action
Examples:
.. code-block:: python
from langchain.agents import XMLAgent
from langchain
tools = ...
model =
"""
tools: List[BaseTool]
"""List of tools this agent has access to."""
llm_chain: LLMChain
"""Chain to use to predict action."""
@property
def input_keys(self) -> List[str]:
return ["input"]
@staticmethod
def get_default_prompt() -> ChatPromptTemplate:
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
return base_prompt + AIMessagePromptTemplate.from_template(
"{intermediate_steps}"
)
@staticmethod
def get_default_output_parser() -> XMLAgentOutputParser:
return XMLAgentOutputParser()
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = self.llm_chain(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
def create_xml_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions for each tool.
* `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| XMLAgentOutputParser()
)
return agent
| [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain.agents.format_scratchpad.format_xml",
"langchain_core._api.deprecated"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')] |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.
Args:
tools: list of tools the agent can choose from
llm_chain: The LLMChain to call to predict the next action
Examples:
.. code-block:: python
from langchain.agents import XMLAgent
from langchain
tools = ...
model =
"""
tools: List[BaseTool]
"""List of tools this agent has access to."""
llm_chain: LLMChain
"""Chain to use to predict action."""
@property
def input_keys(self) -> List[str]:
return ["input"]
@staticmethod
def get_default_prompt() -> ChatPromptTemplate:
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
return base_prompt + AIMessagePromptTemplate.from_template(
"{intermediate_steps}"
)
@staticmethod
def get_default_output_parser() -> XMLAgentOutputParser:
return XMLAgentOutputParser()
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = self.llm_chain(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
def create_xml_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions for each tool.
* `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| XMLAgentOutputParser()
)
return agent
| [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain.agents.format_scratchpad.format_xml",
"langchain_core._api.deprecated"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')] |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.
Args:
tools: list of tools the agent can choose from
llm_chain: The LLMChain to call to predict the next action
Examples:
.. code-block:: python
from langchain.agents import XMLAgent
from langchain
tools = ...
model =
"""
tools: List[BaseTool]
"""List of tools this agent has access to."""
llm_chain: LLMChain
"""Chain to use to predict action."""
@property
def input_keys(self) -> List[str]:
return ["input"]
@staticmethod
def get_default_prompt() -> ChatPromptTemplate:
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
return base_prompt + AIMessagePromptTemplate.from_template(
"{intermediate_steps}"
)
@staticmethod
def get_default_output_parser() -> XMLAgentOutputParser:
return XMLAgentOutputParser()
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = self.llm_chain(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
def create_xml_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions for each tool.
* `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| XMLAgentOutputParser()
)
return agent
| [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain.agents.format_scratchpad.format_xml",
"langchain_core._api.deprecated"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')] |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.format_scratchpad import format_xml
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain.agents.xml.prompt import agent_instructions
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description
@deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.
Args:
tools: list of tools the agent can choose from
llm_chain: The LLMChain to call to predict the next action
Examples:
.. code-block:: python
from langchain.agents import XMLAgent
from langchain
tools = ...
model =
"""
tools: List[BaseTool]
"""List of tools this agent has access to."""
llm_chain: LLMChain
"""Chain to use to predict action."""
@property
def input_keys(self) -> List[str]:
return ["input"]
@staticmethod
def get_default_prompt() -> ChatPromptTemplate:
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
return base_prompt + AIMessagePromptTemplate.from_template(
"{intermediate_steps}"
)
@staticmethod
def get_default_output_parser() -> XMLAgentOutputParser:
return XMLAgentOutputParser()
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = self.llm_chain(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
def create_xml_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses XML to format its logic.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions for each tool.
* `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
llm_with_stop = llm.bind(stop=["</tool_input>"])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| XMLAgentOutputParser()
)
return agent
| [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain.agents.format_scratchpad.format_xml",
"langchain_core._api.deprecated"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')] |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing graphs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.graphs import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(graphs, name)
__all__ = [
"MemgraphGraph",
"NetworkxEntityGraph",
"Neo4jGraph",
"NebulaGraph",
"NeptuneGraph",
"KuzuGraph",
"HugeGraph",
"RdfGraph",
"ArangoGraph",
"FalkorDBGraph",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')] |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing graphs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.graphs import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(graphs, name)
__all__ = [
"MemgraphGraph",
"NetworkxEntityGraph",
"Neo4jGraph",
"NebulaGraph",
"NeptuneGraph",
"KuzuGraph",
"HugeGraph",
"RdfGraph",
"ArangoGraph",
"FalkorDBGraph",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')] |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing graphs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.graphs import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(graphs, name)
__all__ = [
"MemgraphGraph",
"NetworkxEntityGraph",
"Neo4jGraph",
"NebulaGraph",
"NeptuneGraph",
"KuzuGraph",
"HugeGraph",
"RdfGraph",
"ArangoGraph",
"FalkorDBGraph",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')] |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing graphs from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.graphs import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(graphs, name)
__all__ = [
"MemgraphGraph",
"NetworkxEntityGraph",
"Neo4jGraph",
"NebulaGraph",
"NeptuneGraph",
"KuzuGraph",
"HugeGraph",
"RdfGraph",
"ArangoGraph",
"FalkorDBGraph",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')] |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url (str): The input URL.
Returns:
return a 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
| [
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')] |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url (str): The input URL.
Returns:
return a 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
| [
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')] |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url (str): The input URL.
Returns:
return a 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
| [
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')] |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url (str): The input URL.
Returns:
return a 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
| [
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')] |
"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
BaseChatPromptTemplate,
BaseMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Literal["f-string", "jinja2"] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from langchain_core.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain_core.prompts import SemanticSimilarityExampleSelector
from langchain_core.embeddings import OpenAIEmbeddings
from langchain_core.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain_core import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
# Use within an LLM
from langchain_core.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.prompts.string.get_template_variables"
] | [((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (3482, 3484), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((10187, 10214), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10192, 10214), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((11875, 11902), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (11892, 11902), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((4012, 4103), 'langchain_core.prompts.string.get_template_variables', 'get_template_variables', (["(values['prefix'] + values['suffix'])", "values['template_format']"], {}), "(values['prefix'] + values['suffix'], values[\n 'template_format'])\n", (4034, 4103), False, 'from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, get_template_variables\n')] |
"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
BaseChatPromptTemplate,
BaseMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Literal["f-string", "jinja2"] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from langchain_core.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain_core.prompts import SemanticSimilarityExampleSelector
from langchain_core.embeddings import OpenAIEmbeddings
from langchain_core.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain_core import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
# Use within an LLM
from langchain_core.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.prompts.string.get_template_variables"
] | [((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (3482, 3484), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((10187, 10214), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10192, 10214), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((11875, 11902), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (11892, 11902), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((4012, 4103), 'langchain_core.prompts.string.get_template_variables', 'get_template_variables', (["(values['prefix'] + values['suffix'])", "values['template_format']"], {}), "(values['prefix'] + values['suffix'], values[\n 'template_format'])\n", (4034, 4103), False, 'from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, get_template_variables\n')] |
"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
BaseChatPromptTemplate,
BaseMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Literal["f-string", "jinja2"] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from langchain_core.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain_core.prompts import SemanticSimilarityExampleSelector
from langchain_core.embeddings import OpenAIEmbeddings
from langchain_core.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain_core import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
# Use within an LLM
from langchain_core.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.prompts.string.get_template_variables"
] | [((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (3482, 3484), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((10187, 10214), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10192, 10214), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((11875, 11902), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (11892, 11902), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((4012, 4103), 'langchain_core.prompts.string.get_template_variables', 'get_template_variables', (["(values['prefix'] + values['suffix'])", "values['template_format']"], {}), "(values['prefix'] + values['suffix'], values[\n 'template_format'])\n", (4034, 4103), False, 'from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, get_template_variables\n')] |
"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
BaseChatPromptTemplate,
BaseMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Literal["f-string", "jinja2"] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from langchain_core.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain_core.prompts import SemanticSimilarityExampleSelector
from langchain_core.embeddings import OpenAIEmbeddings
from langchain_core.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain_core import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201
# Use within an LLM
from langchain_core.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
def pretty_repr(self, html: bool = False) -> str:
raise NotImplementedError()
| [
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.prompts.string.get_template_variables"
] | [((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (3482, 3484), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((10187, 10214), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10192, 10214), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((11875, 11902), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (11892, 11902), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((4012, 4103), 'langchain_core.prompts.string.get_template_variables', 'get_template_variables', (["(values['prefix'] + values['suffix'])", "values['template_format']"], {}), "(values['prefix'] + values['suffix'], values[\n 'template_format'])\n", (4034, 4103), False, 'from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, get_template_variables\n')] |
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child())
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
| [
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')] |