8000 Fix completion request · Adrien2112/llama-cpp-python@6c7cec0 · GitHub
[go: up one dir, main page]

Skip to content

Commit 6c7cec0

Browse files
committed
Fix completion request
1 parent 6153baa commit 6c7cec0

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama_cpp/server/__main__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ class CreateCompletionRequest(BaseModel):
7676
temperature: float = 0.8
7777
top_p: float = 0.95
7878
echo: bool = False
79-
stop: List[str] = []
79+
stop: Optional[List[str]] = []
8080
stream: bool = False
8181

8282
# ignored or currently unsupported
@@ -173,7 +173,7 @@ class CreateChatCompletionRequest(BaseModel):
173173
temperature: float = 0.8
174174
top_p: float = 0.95
175175
stream: bool = False
176-
stop: List[str] = []
176+
stop: Optional[List[str]] = []
177177
max_tokens: int = 128
178178

179179
# ignored or currently unsupported

0 commit comments

Comments
 (0)
0