|
# LITELLM PROXY DEPENDENCIES # |
|
gradio |
|
open-interpreter |
|
anyio==4.2.0 # openai + http req. |
|
openai>=1.0.0 # openai req. |
|
fastapi # server dep |
|
pydantic>=2.5 # openai req. |
|
backoff==2.2.1 # server dep |
|
pyyaml==6.0 # server dep |
|
uvicorn==0.22.0 # server dep |
|
gunicorn==21.2.0 # server dep |
|
boto3==1.28.58 # aws bedrock/sagemaker calls |
|
redis==4.6.0 # caching |
|
prisma==0.11.0 # for db |
|
mangum==0.17.0 # for aws lambda functions |
|
google-generativeai==0.1.0 # for vertex ai calls |
|
async_generator==1.10.0 # for async ollama calls |
|
traceloop-sdk==0.5.3 # for open telemetry logging |
|
langfuse>=2.0.0 # for langfuse self-hosted logging |
|
orjson==3.9.7 # fast /embedding responses |
|
### LITELLM PACKAGE DEPENDENCIES |
|
python-dotenv>=0.2.0 # for env |
|
tiktoken>=0.4.0 # for calculating usage |
|
importlib-metadata>=6.8.0 # for random utils |
|
tokenizers==0.14.0 # for calculating usage |
|
click==8.1.7 # for proxy cli |
|
jinja2==3.1.2 # for prompt templates |
|
certifi>=2023.7.22 # [TODO] clean up |
|
aiohttp==3.9.0 # for network calls |
|
groq |
|
#### |