8000 docs: Fix README indentation · datxuantran/llama-cpp-python@4142824 · GitHub
[go: up one dir, main page]

Skip to content

Commit 4142824

Browse files
committed
docs: Fix README indentation
1 parent 1539146 commit 4142824

File tree

1 file changed

+47
-47
lines changed

1 file changed

+47
-47
lines changed

README.md

Lines changed: 47 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -116,11 +116,11 @@ Below is a short example demonstrating how to use the high-level API to for basi
116116
>>> from llama_cpp import Llama
117117
>>> llm = Llama(model_path="./models/7B/llama-model.gguf")
118118
>>> output = llm(
119-
"Q: Name the planets in the solar system? A: ", # Prompt
120-
max_tokens=32, # Generate up to 32 tokens
121-
stop=["Q:", "\n"], # Stop generating just before the model would generate a new question
122-
echo=True # Echo the prompt back in the output
123-
)
119+
"Q: Name the planets in the solar system? A: ", # Prompt
120+
max_tokens=32, # Generate up to 32 tokens
121+
stop=["Q:", "\n"], # Stop generating just before the model would generate a new question
122+
echo=True # Echo the prompt back in the output
123+
) # Generate a completion, can also call create_completion
124124
>>> print(output)
125125
{
126126
"id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
@@ -153,13 +153,13 @@ Note that `chat_format` option must be set for the particular model you are usin
153153
>>> from llama_cpp import Llama
154154
>>> llm = Llama(model_path="path/to/llama-2/llama-model.gguf", chat_format="llama-2")
155155
>>> llm.create_chat_completion(
156-
messages = [
157-
{"role": "system", "content": "You are an assistant who perfectly describes images."},
158-
{
159-
"role": "user",
160-
"content": "Describe this image in detail please."
161-
}
162-
]
156+
messages = [
157+
{"role": "system", "content": "You are an assistant who perfectly describes images."},
158+
{
159+
"role": "user",
160+
"content": "Describe this image in detail please."
161+
}
162+
]
163163
)
164164
```
165165

@@ -175,43 +175,43 @@ The gguf-converted files for this model can be found here: [functionary-7b-v1](h
175175
>>> from llama_cpp import Llama
176176
>>> llm = Llama(model_path="path/to/functionary/llama-model.gguf", chat_format="functionary")
177177
>>> llm.create_chat_completion(
178-
messages = [
179-
{
180-
"role": "system",
181-
"content": "A chat between a curious user and an artificial intelligence assitant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant callse functions with appropriate input when necessary"
182-
},
183-
{
184-
"role": "user",
185-
"content": "Extract Jason is 25 years old"
186-
}
187-
],
188-
tools=[{
189-
"type": "function",
190-
"function": {
191-
"name": "UserDetail",
192-
"parameters": {
193-
"type": "object"
194-
"title": "UserDetail",
195-
"properties": {
196-
"name": {
197-
"title": "Name",
198-
"type": "string"
178+
messages = [
179+
{
180+
"role": "system",
181+
"content": "A chat between a curious user and an artificial intelligence assitant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant callse functions with appropriate input when necessary"
182+
},
183+
{
184+
"role": "user",
185+
"content": "Extract Jason is 25 years old"
186+
}
187+
],
188+
tools=[{
189+
"type": "function",
190+
"function": {
191+
"name": "UserDetail",
192+
"parameters": {
193+
"type": "object"
194+
"title": "UserDetail",
195+
"properties": {
196+
"name": {
197+
"title": "Name",
198+
"type": "string"
199+
},
200+
"age": {
201+
"title": "Age",
202+
"type": "integer"
203+
}
199204
},
200-
"age": {
201-
"title": "Age",
202-
"type": "integer"
203-
}
204-
},
205-
"required": [ "name", "age" ]
205+
"required": [ "name", "age" ]
206+
}
207+
}
208+
}],
209+
tool_choices=[{
210+
"type": "function",
211+
"function": {
212+
"name": "UserDetail"
206213
}
207-
}
208-
}],
209-
tool_choices=[{
210-
"type": "function",
211-
"function": {
212-
"name": "UserDetail"
213-
}
214-
}]
214+
}]
215215
)
216216
```
217217

0 commit comments

Comments
 (0)
0