8000 chore: updated docker file to test the latest code · limcheekin/llama-cpp-python@5f9f46e · GitHub
[go: up one dir, main page]

Skip to content

Commit 5f9f46e

Browse files
committed
chore: updated docker file to test the latest code
1 parent d4719f9 commit 5f9f46e

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

Dockerfile.aws

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ RUN apt-get update && \
1919
libopenblas-dev \
2020
ninja-build \
2121
build-essential \
22+
pkg-config \
2223
curl
2324

2425
# Include global args in this stage of the build
@@ -34,13 +35,13 @@ COPY ./ ${FUNCTION_DIR}
3435

3536
RUN python${RUNTIME_VERSION} -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette
3637
# REF: https://github.com/abetlen/llama-cpp-python/blob/main/Dockerfile
37-
RUN cd ${FUNCTION_DIR} && CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 python${RUNTIME_VERSION} setup.py develop
38+
RUN cd ${FUNCTION_DIR} && CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 python${RUNTIME_VERSION} setup.py develop
3839
# Install Lambda Runtime Interface Client for Python
3940
RUN python${RUNTIME_VERSION} -m pip install awslambdaric --target ${FUNCTION_DIR}
4041

4142
# download the model file
4243
RUN mkdir ${FUNCTION_DIR}/model
43-
RUN curl -L https://huggingface.co/TheBloke/orca_mini_v2_7B-GGML/resolve/main/orca-mini-v2_7b.ggmlv3.q4_0.bin -o ${FUNCTION_DIR}/model/ggml-q4_0.bin
44+
RUN curl -L https://huggingface.co/TheBloke/orca_mini_v3_7B-GGML/resolve/main/orca_mini_v3_7b.ggmlv3.q4_0.bin -o ${FUNCTION_DIR}/model/ggml-q4_0.bin
4445

4546
# Stage 3 - final runtime image
4647
# Grab a fresh copy of the Python image

vendor/llama.cpp

0 commit comments

Comments
 (0)
0