10000 english : use `typos` to fix comments and logs (#4354) · Pints-AI/llama.cpp@9494d7c · GitHub
[go: up one dir, main page]

Skip to content

Commit 9494d7c

Browse files
authored
english : use typos to fix comments and logs (ggml-org#4354)
1 parent 6138963 commit 9494d7c

File tree

17 files changed

+34
-34
lines changed

17 files changed

+34
-34
lines changed

common/log.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,13 @@
6161
// #define LOG_TARGET stderr
6262
// #include "log.h"
6363
//
64-
// The log target can also be redirected to a diffrent function
64+
// The log target can also be redirected to a different function
6565
// like so:
6666
//
67-
// #define LOG_TARGET log_handler_diffrent()
67+
// #define LOG_TARGET log_handler_different()
6868
// #include "log.h"
6969
//
70-
// FILE* log_handler_diffrent()
70+
// FILE* log_handler_different()
7171
// {
7272
// return stderr;
7373
// }
@@ -421,7 +421,7 @@ inline FILE *log_handler2_impl(bool change = false, LogTriState append = LogTriS
421421

422422
// Disables logs entirely at runtime.
423423
// Makes LOG() and LOG_TEE() produce no output,
424-
// untill enabled back.
424+
// until enabled back.
425425
#define log_disable() log_disable_impl()
426426

427427
// INTERNAL, DO NOT USE

convert.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
585585

586586
if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
587587
# Transformers models put different tensors in different files, but
588-
# don't split indivdual tensors between files.
588+
# don't split individual tensors between files.
589589
model: LazyModel = {}
590590
for mp in models_plus:
591591
model.update(mp.model)
@@ -678,7 +678,7 @@ def rebuild_from_type_v2(func, new_type, args, state):
678678
return func(*args)
679679

680680
CLASSES: dict[tuple[str, str], Any] = {
681-
# getattr used here as a workaround for mypy not being smart enough to detrmine
681+
# getattr used here as a workaround for mypy not being smart enough to determine
682682
# the staticmethods have a __func__ attribute.
683683
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
684684
('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'),

examples/llava/clip.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
739739
temp->ny = longer_side;
740740
temp->size = 3 * longer_side * longer_side;
741741
temp->data = new uint8_t[temp->size]();
742-
uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA
742+
uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
743743

744744
// fill with background color
745745
for (size_t i = 0; i < temp->size; i++) {

examples/llava/convert-image-encoder-to-gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def bytes_to_unicode():
5151
The reversible bpe codes work on unicode strings.
5252
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
5353
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
54-
This is a signficant percentage of your normal, say, 32K bpe vocab.
54+
This is a significant percentage of your normal, say, 32K bpe vocab.
5555
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
5656
And avoids mapping to whitespace/control characters the bpe code barfs on.
5757
"""

examples/lookahead/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/lookahead
22

3-
Demonstartion of lookahead decoding technique:
3+
Demonstration of lookahead decoding technique:
44

55
https://lmsys.org/blog/2023-11-21-lookahead-decoding/
66

examples/server/json.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11227,7 +11227,7 @@ class binary_reader
1122711227
}
1122811228
if (is_ndarray) // ndarray dimensional vector can only contain integers, and can not embed another array
1122911229
{
11230-
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimentional vector is not allowed", "size"), nullptr));
11230+
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimensional vector is not allowed", "size"), nullptr));
1123111231
}
1123211232
std::vector<size_t> dim;
1123311233
if (JSON_HEDLEY_UNLIKELY(!get_ubjson_ndarray_size(dim)))

examples/server/public/completion.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ export async function* llama(prompt, params = {}, config = {}) {
114114
return content;
115115
}
116116

117-
// Call llama, return an event target that you can subcribe to
117+
// Call llama, return an event target that you can subscribe to
118118
//
119119
// Example:
120120
//

examples/server/public/index.html

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@
238238
cache_prompt: true
239239
})
240240

241-
/* START: Support for storing prompt templates and parameters in borwser LocalStorage */
241+
/* START: Support for storing prompt templates and parameters in browsers LocalStorage */
242242

243243
const local_storage_storageKey = "llamacpp_server_local_storage";
244244

@@ -282,7 +282,7 @@
282282
let importedTemplates = local_storage_getDataAsObject('user_templates')
283283

284284
if (importedTemplates) {
285-
// saved templates were successfuly imported.
285+
// saved templates were successfully imported.
286286

287287
console.log('Processing saved templates and updating default template')
288288
params.value = { ...params.value, image_data: [] };
@@ -303,7 +303,7 @@
303303
}
304304

305305
function userTemplateResetToDefault() {
306-
console.log('Reseting themplate to default')
306+
console.log('Resetting template to default')
307307
selectedUserTemplate.value.name = 'default';
308308
selectedUserTemplate.value.data = savedUserTemplates.value['default'];
309309
}

examples/speculative/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/speculative
22

3-
Demonstartion of speculative decoding and tree-based speculative decoding techniques
3+
Demonstration of speculative decoding and tree-based speculative decoding techniques
44

55
More info:
66

examples/speculative/speculative.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ int main(int argc, char ** argv) {
428428
++n_past_tgt;
429429
}
430430

431-
// the first token is always proposed by the traget model before the speculation loop so we erase it here
431+
// the first token is always proposed by the target model before the speculation loop so we erase it here
432432
for (int s = 0; s < n_seq_dft; ++s) {
433433
if (!drafts[s].active) {
434434
continue;

0 commit comments

Comments
 (0)
0