8000 Formatting · XFree/llama-cpp-python@4f5f99e · GitHub
[go: up one dir, main page]

Skip to content

Commit 4f5f99e

Browse files
committed
Formatting
1 parent 0daf16d commit 4f5f99e

File tree

1 file changed

+7
-3
lines changed

1 file changed

+7
-3
lines changed

llama_cpp/server/__main__.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,9 @@ class Config:
109109
"/v1/completions",
110110
response_model=CreateCompletionResponse,
111111
)
112-
def create_completion(request: CreateCompletionRequest, llama: llama_cpp.Llama=Depends(get_llama)):
112+
def create_completion(
113+
request: CreateCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama)
114+
):
113115
if isinstance(request.prompt, list):
114116
request.prompt = "".join(request.prompt)
115117

@@ -153,7 +155,9 @@ class Config:
153155
"/v1/embeddings",
154156
response_model=CreateEmbeddingResponse,
155157
)
156-
def create_embedding(request: CreateEmbeddingRequest, llama: llama_cpp.Llama=Depends(get_llama)):
158+
def create_embedding(
159+
request: CreateEmbeddingRequest, llama: llama_cpp.Llama = Depends(get_llama)
160+
):
157161
return llama.create_embedding(**request.dict(exclude={"model", "user"}))
158162

159163

@@ -207,7 +211,7 @@ class Config:
207211
)
208212
def create_chat_completion(
209213
request: CreateChatCompletionRequest,
210-
llama: llama_cpp.Llama=Depends(get_llama),
214+
llama: llama_cpp.Llama = Depends(get_llama),
211215
) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]:
212216
completion_or_chunks = llama.create_chat_completion(
213217
**request.dict(

0 commit comments

Comments
 (0)
0