fastapi==0.110 | |
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu | |
llama-cpp-python==0.2.90 | |
requests # <-- add this line | |
gradio==4.43.0 # or latest | |
fastapi==0.110 | |
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu | |
llama-cpp-python==0.2.90 | |
requests # <-- add this line | |
gradio==4.43.0 # or latest | |