Spaces:
Sleeping
Sleeping
Delete Dockerfile
Browse files- Dockerfile +0 -24
Dockerfile
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
FROM debian:bookworm-slim
|
2 |
-
|
3 |
-
RUN apt-get update && apt-get install -y --no-install-recommends \
|
4 |
-
git build-essential cmake curl ca-certificates pkg-config \
|
5 |
-
libcurl4-openssl-dev && rm -rf /var/lib/apt/lists/*
|
6 |
-
|
7 |
-
WORKDIR /app
|
8 |
-
|
9 |
-
RUN git clone --depth 1 https://github.com/ggerganov/llama.cpp.git \
|
10 |
-
&& mkdir -p build && cd build \
|
11 |
-
&& cmake -DCMAKE_BUILD_TYPE=Release \
|
12 |
-
-DGGML_NATIVE=ON \
|
13 |
-
-DLLAMA_BUILD_EXAMPLES=ON \
|
14 |
-
-DLLAMA_BUILD_SERVER=ON \
|
15 |
-
-DLLAMA_BUILD_TESTS=OFF \
|
16 |
-
../llama.cpp \
|
17 |
-
&& cmake --build . --target llama-server -j
|
18 |
-
|
19 |
-
RUN mkdir -p /models && \
|
20 |
-
curl -fL -o /models/model.gguf \
|
21 |
-
"https://huggingface.co/LiquidAI/LFM2-350M-ENJP-MT-GGUF/resolve/main/LFM2-350M-ENJP-MT-Q4_K_M.gguf?download=true"
|
22 |
-
|
23 |
-
EXPOSE 7860
|
24 |
-
CMD ["bash","-lc","/app/build/bin/llama-server -m /models/model.gguf --host 0.0.0.0 --port ${PORT:-7860} -c 2048 -ngl 0"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|