FROM debian:bookworm-slim RUN apt-get update && apt-get install -y --no-install-recommends \ git build-essential cmake curl ca-certificates pkg-config \ libcurl4-openssl-dev && rm -rf /var/lib/apt/lists/* WORKDIR /app RUN git clone --depth 1 https://github.com/ggerganov/llama.cpp.git \ && mkdir -p build && cd build \ && cmake -DCMAKE_BUILD_TYPE=Release \ -DGGML_NATIVE=ON \ -DLLAMA_BUILD_EXAMPLES=ON \ -DLLAMA_BUILD_SERVER=ON \ -DLLAMA_BUILD_TESTS=OFF \ ../llama.cpp \ && cmake --build . --target llama-server -j RUN mkdir -p /models && \ curl -fL -o /models/model.gguf \ "https://huggingface.co/LiquidAI/LFM2-350M-ENJP-MT-GGUF/resolve/main/LFM2-350M-ENJP-MT-Q4_K_M.gguf?download=true" EXPOSE 7860 CMD ["bash","-lc","/app/build/bin/llama-server -m /models/model.gguf --host 0.0.0.0 --port ${PORT:-7860} -c 2048 -ngl 0"]