Spaces:
Sleeping
Sleeping
| # LLM_MODEL_TYPE=openai | |
| # LLM_MODEL_TYPE=gpt4all-j | |
| # LLM_MODEL_TYPE=gpt4all | |
| # LLM_MODEL_TYPE=llamacpp | |
| # LLM_MODEL_TYPE=huggingface | |
| # LLM_MODEL_TYPE=mosaicml | |
| # LLM_MODEL_TYPE=stablelm | |
| # LLM_MODEL_TYPE=openllm | |
| LLM_MODEL_TYPE=hftgi | |
| OPENLLM_SERVER_URL= | |
| HFTGI_SERVER_URL=https://enabled-factually-cougar.ngrok-free.app | |
| OPENAI_API_KEY= | |
| # if unset, default to "gpt-3.5-turbo" | |
| OPENAI_MODEL_NAME= | |
| # cpu, mps or cuda:0 - if unset, use whatever detected | |
| HF_EMBEDDINGS_DEVICE_TYPE= | |
| HF_PIPELINE_DEVICE_TYPE= | |
| # uncomment one of the below to load corresponding quantized model | |
| # LOAD_QUANTIZED_MODEL=4bit | |
| # LOAD_QUANTIZED_MODEL=8bit | |
| DISABLE_MODEL_PRELOADING=true | |
| CHAT_HISTORY_ENABLED=true | |
| SHOW_PARAM_SETTINGS=false | |
| SHARE_GRADIO_APP=false | |
| PDF_FILE_BASE_URL=https://chat-with-llama-2.netlify.app/pdfs/pci_dss_v4/ | |
| # if unset, default to "hkunlp/instructor-xl" | |
| HF_EMBEDDINGS_MODEL_NAME="hkunlp/instructor-large" | |
| # number of cpu cores - used to set n_threads for GPT4ALL & LlamaCpp models | |
| NUMBER_OF_CPU_CORES= | |
| HUGGINGFACE_AUTH_TOKEN= | |
| USING_TORCH_BFLOAT16=true | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-3b" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-7b" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-12b" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/wizardLM-7B-HF" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/vicuna-7B-1.1-HF" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-j" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-falcon" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="lmsys/fastchat-t5-3b-v1.0" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-7b-chat-hf" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-13b-chat-hf" | |
| # HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-70b-chat-hf" | |
| HUGGINGFACE_MODEL_NAME_OR_PATH="microsoft/Orca-2-7b" | |
| STABLELM_MODEL_NAME_OR_PATH="OpenAssistant/stablelm-7b-sft-v7-epoch-3" | |
| MOSAICML_MODEL_NAME_OR_PATH="mosaicml/mpt-7b-instruct" | |
| FALCON_MODEL_NAME_OR_PATH="tiiuae/falcon-7b-instruct" | |
| GPT4ALL_J_MODEL_PATH="./models/ggml-gpt4all-j-v1.3-groovy.bin" | |
| GPT4ALL_J_DOWNLOAD_LINK=https://gpt4all.io/models/ggml-gpt4all-j-v1.3-groovy.bin | |
| GPT4ALL_MODEL_PATH="./models/ggml-nous-gpt4-vicuna-13b.bin" | |
| GPT4ALL_DOWNLOAD_LINK=https://gpt4all.io/models/ggml-nous-gpt4-vicuna-13b.bin | |
| LLAMACPP_MODEL_PATH="./models/wizardLM-7B.ggmlv3.q4_1.bin" | |
| LLAMACPP_DOWNLOAD_LINK=https://huggingface.co/TheBloke/wizardLM-7B-GGML/resolve/main/wizardLM-7B.ggmlv3.q4_1.bin | |
| # Index for PCI DSS v4 PDF files - chunk_size=1024 chunk_overlap=512 | |
| # CHROMADB_INDEX_PATH="./data/chromadb_1024_512/" | |
| FAISS_INDEX_PATH="./data/faiss_1024_512/" | |
| CHAT_QUESTION="What's the capital city of Malaysia?" | |
| QA_QUESTION="What's PCI DSS?" | |
| # QUESTIONS_FILE_PATH="./data/questions.txt" | |
| QUESTIONS_FILE_PATH="./data/questions_with_faq.txt" | |
| TOKENIZERS_PARALLELISM=true | |
| # env variables for ingesting source PDF files | |
| SOURCE_PDFS_PATH="./data/pdfs/" | |
| SOURCE_URLS="./data/pci_dss_urls.txt" | |
| CHUNCK_SIZE=1024 | |
| CHUNK_OVERLAP=512 | |
| VERTOPAL_APP_ID= | |
| VERTOPAL_TOKEN= | |