8000 docs: Add chat examples to openapi ui · zeroAucrux/llama-cpp-python@f7decc9 · GitHub
[go: up one dir, main page]

Skip to content

Commit f7decc9

Browse files
committed
docs: Add chat examples to openapi ui
1 parent 60d8498 commit f7decc9

File tree

1 file changed

+59
-9
lines changed

1 file changed

+59
-9
lines changed

llama_cpp/server/app.py

Lines changed: 59 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,7 @@
1212
import anyio
1313
from anyio.streams.memory import MemoryObjectSendStream
1414
from starlette.concurrency import run_in_threadpool, iterate_in_threadpool
15-
from fastapi import (
16-
Depends,
17-
FastAPI,
18-
APIRouter,
19-
Request,
20-
HTTPException,
21-
status,
22-
)
15+
from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body
2316
from fastapi.middleware import Middleware
2417
from fastapi.middleware.cors import CORSMiddleware
2518
from fastapi.security import HTTPBearer
@@ -356,7 +349,64 @@ async def create_embedding(
356349
)
357350
async def create_chat_completion(
358351
request: Request,
359-
body: CreateChatCompletionRequest,
352+
body: CreateChatCompletionRequest = Body(
353+
openapi_examples={
354+
"normal": {
355+
"summary": "Chat Completion",
356+
"value": {
357+
"model": "gpt-3.5-turbo",
358+
"messages": [
359+
{"role": "system", "content": "You are a helpful assistant."},
360+
{"role": "user", "content": "What is the capital of France?"},
361+
],
362+
},
363+
},
364+
"json_mode": {
365+
"summary": "JSON Mode",
366+
"value": {
367+
"model": "gpt-3.5-turbo",
368+
"messages": [
369+
{"role": "system", "content": "You are a helpful assistant."},
370+
{"role": "user", "content": "Who won the world series in 2020"},
371+
],
372+
"response_format": { "type": "json_object" }
373+
},
374+
},
375+
"tool_calling": {
376+
"summary": "Tool Calling",
377+
"value": {
378+
"model": "gpt-3.5-turbo",
379+
"messages": [
380+
{"role": "system", "content": "You are a helpful assistant."},
381+
{"role": "user", "content": "Extract Jason is 30 years old."},
382+
],
383+
"tools": [
384+
{
385+
"type": "function",
386+
"function": {
387+
"name": "User",
388+
"description": "User record",
389+
"parameters": {
390+
"type": "object",
391+
"properties": {
392+
"name": {"type": "string"},
393+
"age": {"type": "number"},
394+
},
395+
"required": ["name", "age"],
396+
},
397+
}
398+
}
399+
],
400+
"tool_choice": {
401+
"type": "function",
402+
"function": {
403+
"name": "User",
404+
}
405+
}
406+
},
407+
},
408+
}
409+
),
360410
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
361411
) -> llama_cpp.ChatCompletion:
362412
exclude = {

0 commit comments

Comments
 (0)
0