From f1d6e3a6f068798491cf2ed33dd4a39cb03f5710 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Kopci=C5=84ski?= Date: Fri, 24 Jan 2025 10:12:10 +0100 Subject: [PATCH] fix typo tokenizer -> tokenizerSource --- docs/docs/fundamentals/loading-models.md | 2 +- docs/docs/llms/running-llms.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/docs/fundamentals/loading-models.md b/docs/docs/fundamentals/loading-models.md index e5381880ca..5c56d7912e 100644 --- a/docs/docs/fundamentals/loading-models.md +++ b/docs/docs/fundamentals/loading-models.md @@ -40,6 +40,6 @@ import { useLLM } from 'react-native-executorch'; const llama = useLLM({ modelSource: 'https://.../llama3_2.pte', - tokenizer: require('../assets/tokenizer.bin'), + tokenizerSource: require('../assets/tokenizer.bin'), }); ``` diff --git a/docs/docs/llms/running-llms.md b/docs/docs/llms/running-llms.md index 93a3e4e7db..a00bad237b 100644 --- a/docs/docs/llms/running-llms.md +++ b/docs/docs/llms/running-llms.md @@ -18,7 +18,7 @@ import { useLLM, LLAMA3_2_1B } from 'react-native-executorch'; const llama = useLLM({ modelSource: LLAMA3_2_1B, - tokenizer: require('../assets/tokenizer.bin'), + tokenizerSource: require('../assets/tokenizer.bin'), contextWindowLength: 3, }); ``` @@ -37,7 +37,7 @@ Given computational constraints, our architecture is designed to support only on **`modelSource`** - A string that specifies the location of the model binary. For more information, take a look at [loading models](../fundamentals/loading-models.md) section. -**`tokenizer`** - URL to the binary file which contains the tokenizer +**`tokenizerSource`** - URL to the binary file which contains the tokenizer **`contextWindowLength`** - The number of messages from the current conversation that the model will use to generate a response. The higher the number, the more context the model will have. Keep in mind that using larger context windows will result in longer inference time and higher memory usage. @@ -62,7 +62,7 @@ In order to send a message to the model, one can use the following code: ```typescript const llama = useLLM( modelSource: LLAMA3_2_1B, - tokenizer: require('../assets/tokenizer.bin'), + tokenizerSource: require('../assets/tokenizer.bin'), ); ...