8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 684d7c8 commit 8895b90Copy full SHA for 8895b90
llama_cpp/server/app.py
@@ -167,8 +167,9 @@ def get_llama():
167
)
168
169
class CreateCompletionRequest(BaseModel):
170
- prompt: Optional[str] = Field(
171
- default="", description="The prompt to generate completions for."
+ prompt: Union[str, List[str]] = Field(
+ default="",
172
+ description="The prompt to generate completions for."
173
174
suffix: Optional[str] = Field(
175
default=None,
@@ -222,6 +223,9 @@ class Config:
222
223
def create_completion(
224
request: CreateCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama)
225
):
226
+ if isinstance(request.prompt, list):
227
+ request.prompt = "".join(request.prompt)
228
+
229
completion_or_chunks = llama(
230
**request.dict(
231
exclude={
0 commit comments