8000 Merge branch 'main' of github.com:abetlen/llama-cpp-python into bette… · Stonelinks/llama-cpp-python@3008a95 · GitHub
[go: up one dir, main page]

Skip to content

Commit 3008a95

Browse files
committed
Merge branch 'main' of github.com:abetlen/llama-cpp-python into better-server-params-and-fields
2 parents b9098b0 + a02aa12 commit 3008a95

File tree

11 files changed

+268
-16
lines changed

11 files changed

+268
-16
lines changed

.dockerignore

Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
_skbuild/
2+
3+
.envrc
4+
5+
models/
6+
7+
# Byte-compiled / optimized / DLL files
8+
__pycache__/
9+
*.py[cod]
10+
*$py.class
11+
12+
# C extensions
13+
*.so
14+
15+
# Distribution / packaging
16+
.Python
17+
build/
18+
develop-eggs/
19+
dist/
20+
downloads/
21+
eggs/
22+
.eggs/
23+
lib/
24+
lib64/
25+
parts/
26+
sdist/
27+
var/
28+
wheels/
29+
share/python-wheels/
30+
*.egg-info/
31+
.installed.cfg
32+
*.egg
33+
MANIFEST
34+
35+
# PyInstaller
36+
# Usually these files are written by a python script from a template
37+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
38+
*.manifest
39+
*.spec
40+
41+
# Installer logs
42+
pip-log.txt
43+
pip-delete-this-directory.txt
44+
45+
# Unit test / coverage reports
46+
htmlcov/
47+
.tox/
48+
.nox/
49+
.coverage
50+
.coverage.*
51+
.cache
52+
nosetests.xml
53+
coverage.xml
54+
*.cover
55+
*.py,cover
56+
.hypothesis/
57+
.pytest_cache/
58+
cover/
59+
60+
# Translations
61+
*.mo
62+
*.pot
63+
64+
# Django stuff:
65+
*.log
66+
local_settings.py
67+
db.sqlite3
68+
db.sqlite3-journal
69+
70+
# Flask stuff:
71+
instance/
72+
.webassets-cache
73+
74+
# Scrapy stuff:
75+
.scrapy
76+
77+
# Sphinx documentation
78+
docs/_build/
79+
80+
# PyBuilder
81+
.pybuilder/
82+
target/
83+
84+
# Jupyter Notebook
85+
.ipynb_checkpoints
86+
87+
# IPython
88+
profile_default/
89+
ipython_config.py
90+
91+
# pyenv
92+
# For a library or package, you might want to ignore these files since the code is
93+
# intended to run in multiple environments; otherwise, check them in:
94+
# .python-version
95+
96+
# pipenv
97+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
99+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
100+
# install all needed dependencies.
101+
#Pipfile.lock
102+
103+
# poetry
104+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105+
# This is especially recommended for binary packages to ensure reproducibility, and is more
106+
# commonly ignored for libraries.
107+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108+
#poetry.lock
109+
110+
# pdm
111+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112+
#pdm.lock
113+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114+
# in version control.
115+
# https://pdm.fming.dev/#use-with-ide
116+
.pdm.toml
117+
118+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119+
__pypackages__/
120+
121+
# Celery stuff
122+
celerybeat-schedule
123+
celerybeat.pid
124+
125+
# SageMath parsed files
126+
*.sage.py
127+
128+
# Environments
129+
.env
130+
.venv
131+
env/
132+
venv/
133+
ENV/
134+
env.bak/
135+
venv.bak/
136+
137+
# Spyder project settings
138+
.spyderproject
139+
.spyproject
140+
141+
# Rope project settings
142+
.ropeproject
143+
144+
# mkdocs documentation
145+
/site
146+
147+
# mypy
148+
.mypy_cache/
149+
.dmypy.json
150+
dmypy.json
151+
152+
# Pyre type checker
153+
.pyre/
154+
155+
# pytype static type analyzer
156+
.pytype/
157+
158+
# Cython debug symbols
159+
cython_debug/
160+
161+
# PyCharm
162+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
163+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
164+
# and can be added to the global gitignore or merged into this file. For a more nuclear
165+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
166+
.idea/

.github/workflows/build-docker.yaml

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
name: Build Docker
2+
3+
on: workflow_dispatch
4+
5+
permissions:
6+
contents: write
7+
packages: write
8+
9+
jobs:
10+
docker:
11+
name: Build and push Docker image
12+
runs-on: ubuntu-latest
13+
steps:
14+
- name: Checkout
15+
uses: actions/checkout@v3
16+
with:
17+
submodules: "true"
18+
19+
- name: Set up QEMU
20+
uses: docker/setup-qemu-action@v2
21+
22+
- name: Set up Docker Buildx
23+
uses: docker/setup-buildx-action@v2
24+
25+
- name: Login to GitHub Container Registry
26+
uses: docker/login-action@v2
27+
with:
28+
registry: ghcr.io
29+
username: ${{ github.repository_owner }}
30+
password: ${{ secrets.GITHUB_TOKEN }}
31+
32+
- name: Build and push
33+
uses: docker/build-push-action@v4
34+
with:
35+
context: .
36+
push: true # push to registry
37+
pull: true # always fetch the latest base images
38+
platforms: linux/amd64,linux/arm64 # build for both amd64 and arm64
39+
tags: ghcr.io/abetlen/llama-cpp-python:latest

.github/workflows/publish.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,4 @@ jobs:
2828
# if: startsWith(github.ref, 'refs/tags')
2929
uses: pypa/gh-action-pypi-publish@release/v1
3030
with:
31-
password: ${{ secrets.PYPI_API_TOKEN }}
31+
password: ${{ secrets.PYPI_API_TOKEN }}

Dockerfile

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
FROM python:3-bullseye
2+
3+
# We need to set the host to 0.0.0.0 to allow outside access
4+
ENV HOST 0.0.0.0
5 F438 +
6+
COPY . .
7+
8+
# Install the package
9+
RUN apt update && apt install -y libopenblas-dev
10+
RUN python -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette
11+
12+
RUN LLAMA_OPENBLAS=1 python3 setup.py develop
13+
14+
# Run the server
15+
CMD python3 -m llama_cpp.server

Dockerfile.cuda

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
FROM nvidia/cuda:12.1.1-devel-ubuntu20.04
2+
3+
# We need to set the host to 0.0.0.0 to allow outside access
4+
ENV HOST 0.0.0.0
5+
6+
COPY . .
7+
8+
# Install the package
9+
RUN apt update && apt install -y python3 python3-pip
10+
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette
11+
12+
RUN LLAMA_CUBLAS=1 python3 setup.py develop
13+
14+
# Run the server
15+
CMD python3 -m llama_cpp.server

README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,14 @@ python3 -m llama_cpp.server
7272

7373
Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation.
7474

75+
## Docker image
76+
77+
A Docker image is available on [GHCR](https://ghcr.io/abetlen/llama-cpp-python). To run the server:
78+
79+
```bash
80+
docker run --rm -it -p8000:8000 -v /path/to/models:/models -eMODEL=/models/ggml-model-name.bin ghcr.io/abetlen/llama-cpp-python:latest
81+
```
82+
7583
## Low-level API
7684

7785
The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`.

llama_cpp/llama.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,14 @@ class LlamaState:
5353
def __init__(
5454
self,
5555
eval_tokens: Deque[llama_cpp.llama_token],
56-
eval_logits: Deque[List[float]],
56+
eval_logits: Deque[List[llama_cpp.c_float]],
5757
llama_state,
58+
llama_state_size: llama_cpp.c_size_t,
5859
):
5960
self.eval_tokens = eval_tokens
6061
self.eval_logits = eval_logits
6162
self.llama_state = llama_state
63+
self.llama_state_size = llama_state_size
6264

6365

6466
class Llama:
@@ -394,7 +396,7 @@ def generate(
394396
and tuple(self.eval_tokens) == tuple(tokens[: len(self.eval_tokens)])
395397
):
396398
if self.verbose:
397-
print("generate cache hit", file=sys.stderr)
399+
print("Llama.generate: cache hit", file=sys.stderr)
398400
reset = False
399401
tokens = tokens[len(self.eval_tokens) :]
400402

@@ -516,7 +518,7 @@ def _create_completion(
516518

517519
if self.cache and prompt_tokens in self.cache:
518520
if self.verbose:
519-
print("cache hit", file=sys.stderr)
521+
print("Llama._create_completion: cache hit", file=sys.stderr)
520522
self.load_state(self.cache[prompt_tokens])
521523

522524
finish_reason = "length"
@@ -536,7 +538,7 @@ def _create_completion(
536538
if self.cache and len(completion_tokens) == 0:
537539
if prompt_tokens not in self.cache:
538540
if self.verbose:
539-
print("cache miss", file=sys.stderr)
541+
print("Llama._create_completion: cache miss", file=sys.stderr)
540542
self.cache[prompt_tokens] = self.save_state()
541543

542544
completion_tokens.append(token)
@@ -950,19 +952,25 @@ def save_state(self) -> LlamaState:
950952
assert self.ctx is not None
951953
state_size = llama_cpp.llama_get_state_size(self.ctx)
952954
llama_state = (llama_cpp.c_uint8 * int(state_size))()
953-
if llama_cpp.llama_copy_state_data(self.ctx, llama_state) != state_size:
955+
n_bytes = llama_cpp.llama_copy_state_data(self.ctx, llama_state)
956+
if int(n_bytes) > int(state_size):
954957
raise RuntimeError("Failed to copy llama state data")
958+
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
959+
llama_cpp.ctypes.memmove(llama_state_compact, llama_state, int(n_bytes))
960+
if self.verbose:
961+
print(f"Llama.save_state: saving {n_bytes} bytes of llama state", file=sys.stderr)
955962
return LlamaState(
956963
eval_tokens=self.eval_tokens.copy(),
957964
eval_logits=self.eval_logits.copy(),
958-
llama_state=llama_state,
965+
llama_state=llama_state_compact,
966+
llama_state_size=n_bytes,
959967
)
960968

961969
def load_state(self, state: LlamaState) -> None:
962970
assert self.ctx is not None
963971
self.eval_tokens = state.eval_tokens.copy()
964972
self.eval_logits = state.eval_logits.copy()
965-
state_size = llama_cpp.llama_get_state_size(self.ctx)
973+
state_size = state.llama_state_size
966974
if llama_cpp.llama_set_state_data(self.ctx, state.llama_state) != state_size:
967975
raise RuntimeError("Failed to set llama state data")
968976

llama_cpp/llama_cpp.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def _load_shared_library(lib_base_name):
7171
LLAMA_FILE_MAGIC = b"ggjt"
7272
LLAMA_FILE_MAGIC_UNVERSIONED = b"ggml"
7373
LLAMA_SESSION_MAGIC = b"ggsn"
74-
LLAMA_SESSION_VERSION = ctypes.c_int(0)
74+
LLAMA_SESSION_VERSION = ctypes.c_int(1)
7575

7676
llama_context_p = c_void_p
7777

@@ -136,9 +136,9 @@ class llama_context_params(Structure):
136136
) # tok_embeddings.weight and output.weight are F16
137137
LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors
138138
# LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors
139-
LLAMA_FTYPE_MOSTYL_Q8_0 = ctypes.c_int(7) # except 1d tensors
140-
LLAMA_FTYPE_MOSTYL_Q5_0 = ctypes.c_int(8) # except 1d tensors
141-
LLAMA_FTYPE_MOSTYL_Q5_1 = ctypes.c_int(9) # except 1d tensors
139+
LLAMA_FTYPE_MOSTLY_Q8_0 = ctypes.c_int(7) # except 1d tensors
140+
LLAMA_FTYPE_MOSTLY_Q5_0 = ctypes.c_int(8) # except 1d tensors
141+
LLAMA_FTYPE_MOSTLY_Q5_1 = ctypes.c_int(9) # except 1d tensors
142142

143143
# Functions
144144

@@ -239,7 +239,8 @@ def llama_set_rng_seed(ctx: llama_context_p, seed: c_int):
239239
_lib.llama_set_rng_seed.restype = None
240240

241241

242-
# Returns the size in bytes of the state (rng, logits, embedding and kv_cache)
242+
# Returns the maximum size in bytes of the state (rng, logits, embedding
243+
# and kv_cache) - will often be smaller after compacting tokens
243244
def llama_get_state_size(ctx: llama_context_p) -> c_size_t:
244245
return _lib.llama_get_state_size(ctx)
245246

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "llama_cpp_python"
3-
version = "0.1.40"
3+
version = "0.1.41"
44
description = "Python bindings for the llama.cpp library"
55
authors = ["Andrei Betlen <abetlen@gmail.com>"]
66
license = "MIT"

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
description="A Python wrapper for llama.cpp",
1111
long_description=long_description,
1212
long_description_content_type="text/markdown",
13-
version="0.1.40",
13+
version="0.1.41",
1414
author="Andrei Betlen",
1515
author_email="abetlen@gmail.com",
1616
license="MIT",

vendor/llama.cpp

0 commit comments

Comments
 (0)
0