diff --git a/docs/docs/benchmarks/_category_.json b/docs/docs/benchmarks/_category_.json
index 001b34959d..e790334696 100644
--- a/docs/docs/benchmarks/_category_.json
+++ b/docs/docs/benchmarks/_category_.json
@@ -1,6 +1,6 @@
{
"label": "Benchmarks",
- "position": 7,
+ "position": 8,
"link": {
"type": "generated-index"
}
diff --git a/docs/docs/benchmarks/memory-usage.md b/docs/docs/benchmarks/memory-usage.md
index 868a0884b6..c105b6d286 100644
--- a/docs/docs/benchmarks/memory-usage.md
+++ b/docs/docs/benchmarks/memory-usage.md
@@ -34,3 +34,10 @@ sidebar_position: 2
| LLAMA3_2_3B | 7.1 | 7.3 |
| LLAMA3_2_3B_SPINQUANT | 3.7 | 3.8 |
| LLAMA3_2_3B_QLORA | 4 | 4.1 |
+
+## Speech to text
+
+| Model | Android (XNNPACK) [MB] | iOS (XNNPACK) [MB] |
+| -------------- | ---------------------- | ------------------ |
+| WHISPER_TINY | 900 | 600 |
+| MOONSHINE_TINY | 650 | 560 |
diff --git a/docs/docs/benchmarks/model-size.md b/docs/docs/benchmarks/model-size.md
index a80f59d47f..78fc8ccaa4 100644
--- a/docs/docs/benchmarks/model-size.md
+++ b/docs/docs/benchmarks/model-size.md
@@ -34,3 +34,10 @@ sidebar_position: 1
| LLAMA3_2_3B | 6.43 |
| LLAMA3_2_3B_SPINQUANT | 2.55 |
| LLAMA3_2_3B_QLORA | 2.65 |
+
+## Speech to text
+
+| Model | XNNPACK [MB] |
+| -------------- | ------------ |
+| WHISPER_TINY | 231.0 |
+| MOONSHINE_TINY | 148.9 |
diff --git a/docs/docs/computer-vision/_category_.json b/docs/docs/computer-vision/_category_.json
index 5aa6c0263c..1a78d5e75f 100644
--- a/docs/docs/computer-vision/_category_.json
+++ b/docs/docs/computer-vision/_category_.json
@@ -1,6 +1,6 @@
{
"label": "Computer Vision",
- "position": 3,
+ "position": 4,
"link": {
"type": "generated-index"
}
diff --git a/docs/docs/hookless-api/ClassificationModule.md b/docs/docs/hookless-api/ClassificationModule.md
index 732971db27..2e62cbd4ab 100644
--- a/docs/docs/hookless-api/ClassificationModule.md
+++ b/docs/docs/hookless-api/ClassificationModule.md
@@ -3,7 +3,7 @@ title: ClassificationModule
sidebar_position: 1
---
-Hookless implementation of the [useClassification](../computer-vision/useClassification.mdx) hook.
+Hookless implementation of the [useClassification](../computer-vision/useClassification.md) hook.
## Reference
diff --git a/docs/docs/hookless-api/LLMModule.md b/docs/docs/hookless-api/LLMModule.md
index d52e2e0376..037b151bff 100644
--- a/docs/docs/hookless-api/LLMModule.md
+++ b/docs/docs/hookless-api/LLMModule.md
@@ -3,7 +3,7 @@ title: LLMModule
sidebar_position: 3
---
-Hookless implementation of the [useLLM](../llms/running-llms.md) hook.
+Hookless implementation of the [useLLM](../llms/useLLM.md) hook.
## Reference
diff --git a/docs/docs/hookless-api/ObjectDetectionModule.md b/docs/docs/hookless-api/ObjectDetectionModule.md
index 2cc3504ef4..6c730b7fe0 100644
--- a/docs/docs/hookless-api/ObjectDetectionModule.md
+++ b/docs/docs/hookless-api/ObjectDetectionModule.md
@@ -3,7 +3,7 @@ title: ObjectDetectionModule
sidebar_position: 5
---
-Hookless implementation of the [useObjectDetection](../computer-vision/useObjectDetection.mdx) hook.
+Hookless implementation of the [useObjectDetection](../computer-vision/useObjectDetection.md) hook.
## Reference
diff --git a/docs/docs/hookless-api/SpeechToTextModule.md b/docs/docs/hookless-api/SpeechToTextModule.md
new file mode 100644
index 0000000000..2438c8431d
--- /dev/null
+++ b/docs/docs/hookless-api/SpeechToTextModule.md
@@ -0,0 +1,55 @@
+---
+title: SpeechToTextModule
+sidebar_position: 6
+---
+
+Hookless implementation of the [useSpeechToText](../speech-to-text/) hook.
+
+## Reference
+
+```typescript
+import { SpeechToTextModule } from 'react-native-executorch';
+
+const audioUrl = 'https://www.your-url.com/cool-audio.mp3';
+
+// Loading the model
+const onSequenceUpdate = (sequence) => {
+ console.log(sequence);
+};
+await SpeechToTextModule.load('moonshine', onSequenceUpdate);
+
+// Loading the audio and running the model
+await SpeechToTextModule.loadAudio(audioUrl);
+const transcribedText = await SpeechToTextModule.transcribe();
+```
+
+### Methods
+
+| Method | Type | Description |
+| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `load` | (modelName: 'whisper' | 'moonshine, transcribeCallback?: (sequence: string) => void, modelDownloadProgressCalback?: (downloadProgress: number) => void, encoderSource?: ResourceSource, decoderSource?: ResourceSource, tokenizerSource?: ResourceSource) | Loads the model specified with `modelName`, where `encoderSource`, `decoderSource`, `tokenizerSource` are strings specifying the location of the binaries for the models. `modelDownloadProgressCallback` allows you to monitor the current progress of the model download, while `transcribeCallback` is invoked with each generated token |
+| `transcribe` | `(waveform: number[]): Promise` | Starts a transcription process for a given input array, which should be a waveform at 16kHz. When no input is provided, it uses an internal state which is set by calling `loadAudio`. Resolves a promise with the output transcription when the model is finished. |
+| `loadAudio` | `(url: string) => void` | Loads audio file from given url. It sets an internal state which serves as an input to `transcribe()`. |
+| `encode` | `(waveform: number[]) => Promise` | Runs the encoding part of the model. Returns a float array representing the output of the encoder. |
+| `decode` | `(tokens: number[], encodings: number[]) => Promise` | Runs the decoder of the model. Returns a single token representing a next token in the output sequence. |
+
+
+Type definitions
+
+```typescript
+type ResourceSource = string | number;
+```
+
+
+
+## Loading the model
+
+To load the model, use the `load` method. The required argument is `modelName`, which serves as an identifier for which model to use. It also accepts accepts optional arguments such as `encoderSource`, `decoderSource`, `tokenizerSource` which are strings that specify the location of the binaries for the model. For more information, take a look at [loading models](../fundamentals/loading-models.md) page. This method returns a promise, which can resolve to an error or void.
+
+## Running the model
+
+To run the model, you can use the `transcribe` method. It accepts one argument, which is an array of numbers representing a waveform at 16kHz sampling rate. The method returns a promise, which can resolve either to an error or a string containing the output text.
+
+## Obtaining the input
+
+To get the input, you can use the `loadAudio` method, which sets the internal input state of the model. Then you can just call `transcribe` without passing any args. It is also possible to pass inputs from other sources, as long as it is a float array containing the aforementioned waveform.
diff --git a/docs/docs/hookless-api/StyleTransferModule.md b/docs/docs/hookless-api/StyleTransferModule.md
index f084d8cad5..29c750bee3 100644
--- a/docs/docs/hookless-api/StyleTransferModule.md
+++ b/docs/docs/hookless-api/StyleTransferModule.md
@@ -3,7 +3,7 @@ title: StyleTransferModule
sidebar_position: 4
---
-Hookless implementation of the [useStyleTransfer](../computer-vision/useStyleTransfer.mdx) hook.
+Hookless implementation of the [useStyleTransfer](../computer-vision/useStyleTransfer.md) hook.
## Reference
diff --git a/docs/docs/hookless-api/_category_.json b/docs/docs/hookless-api/_category_.json
index e96f518638..6c0a89084f 100644
--- a/docs/docs/hookless-api/_category_.json
+++ b/docs/docs/hookless-api/_category_.json
@@ -1,6 +1,6 @@
{
"label": "Hookless API",
- "position": 4,
+ "position": 5,
"link": {
"type": "generated-index"
}
diff --git a/docs/docs/module-api/_category_.json b/docs/docs/module-api/_category_.json
index b04000182d..8cc82679ce 100644
--- a/docs/docs/module-api/_category_.json
+++ b/docs/docs/module-api/_category_.json
@@ -1,6 +1,6 @@
{
"label": "Module API",
- "position": 5,
+ "position": 6,
"link": {
"type": "generated-index"
}
diff --git a/docs/docs/speech-to-text/_category_.json b/docs/docs/speech-to-text/_category_.json
new file mode 100644
index 0000000000..554e3476a1
--- /dev/null
+++ b/docs/docs/speech-to-text/_category_.json
@@ -0,0 +1,7 @@
+{
+ "label": "Speech To Text",
+ "position": 3,
+ "link": {
+ "type": "generated-index"
+ }
+}
diff --git a/docs/docs/speech-to-text/useSpeechToText.md b/docs/docs/speech-to-text/useSpeechToText.md
new file mode 100644
index 0000000000..6cde2e04cf
--- /dev/null
+++ b/docs/docs/speech-to-text/useSpeechToText.md
@@ -0,0 +1,125 @@
+---
+title: useSpeechToText
+sidebar_position: 1
+---
+
+With the latest `v0.3.0` release we introduce a new hook - `useSpeechToText`. Speech to text is a task that allows to transform spoken language to written text. It is commonly used to implement features such as transcription or voice assistants. As of now, [all supported STT models](#supported-models) run on the XNNPACK backend.
+
+:::info
+Currently, we do not support direct microphone input streaming to the model. Instead, in v0.3.0, we provide a way to transcribe an audio file.
+:::
+
+:::caution
+It is recommended to use models provided by us, which are available at our [Hugging Face repository](https://huggingface.co/software-mansion/react-native-executorch-moonshine-tiny). You can also use [constants](https://github.com/software-mansion/react-native-executorch/tree/main/src/constants/modelUrls.ts) shipped with our library
+:::
+
+## Reference
+
+```typescript
+import { useSpeechToText } from 'react-native-executorch';
+
+const { transcribe, error, loadAudio } = useSpeechToText({
+ modelName: 'moonshine',
+});
+
+const audioUrl = ...; // URL with audio to transcribe
+
+await loadAudio(audioUrl);
+const transcription = await transcribe();
+if (error) {
+ console.log(error);
+} else {
+ console.log(transcription);
+}
+```
+
+### Streaming
+
+Given that STT models can process audio no longer than 30 seconds, there is a need to chunk the input audio. Chunking audio may result in cutting speech mid-sentence, which might be hard to understand for the model. To make it work, we employed an algorithm (adapted for mobile devices from [whisper-streaming](https://aclanthology.org/2023.ijcnlp-demo.3.pdf)) that uses overlapping audio chunks. This might introduce some overhead, but allows for processing audio inputs of arbitrary length.
+
+### Arguments
+
+**`modelName`**
+A literal of `"moonshine" | "whisper"` which serves as an identifier for which model should be used.
+
+**`encoderSource?`**
+A string that specifies the location of a .pte file for the encoder. For further information on passing model sources, check out [Loading Models](https://docs.swmansion.com/react-native-executorch/docs/fundamentals/loading-models). Defaults to [constants](https://github.com/software-mansion/react-native-executorch/blob/main/src/constants/modelUrls.ts) for given model.
+
+**`decoderSource?`**
+Analogous to the encoderSource, this takes in a string which is a source for the decoder part of the model. Defaults to [constants](https://github.com/software-mansion/react-native-executorch/blob/main/src/constants/modelUrls.ts) for given model.
+
+**`tokenizerSource?`**
+A string that specifies the location to the tokenizer for the model. This works just as the encoder and decoder do. Defaults to [constants](https://github.com/software-mansion/react-native-executorch/blob/main/src/constants/modelUrls.ts) for given model.
+
+**`overlapSeconds?`**
+Specifies the length of overlap between consecutive audio chunks (expressed in seconds).
+
+**`windowSize?`**
+Specifies the size of each audio chunk (expressed in seconds).
+
+### Returns
+
+| Field | Type | Description |
+| ------------------ | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `transcribe` | `(input?: number[]) => Promise` | Starts a transcription process for a given input array, which should be a waveform at 16kHz. When no input is provided, it uses an internal state which is set by calling `loadAudio`. Resolves a promise with the output transcription when the model is finished. |
+| `loadAudio` | `(url: string) => void` | Loads audio file from given url. It sets an internal state which serves as an input to `transcribe()`. |
+| `error` | Error | undefined | Contains the error message if the model failed to load. |
+| `sequence` | string | This property is updated with each generated token. If you're looking to obtain tokens as they're generated, you should use this property. |
+| `isGenerating` | `boolean` | Indicates whether the model is currently processing an inference. |
+| `isReady` | `boolean` | Indicates whether the model has successfully loaded and is ready for inference. |
+| `downloadProgress` | `number` | Tracks the progress of the model download process. |
+
+## Running the model
+
+Before running the model's `transcribe` method be sure to obtain waveform of the audio You wish to transcribe. You can either use `loadAudio` method to load audio from a url and save it in model's internal state or obtain the waveform on your own (remember to use sampling rate of 16kHz!). In the latter case just pass the obtained waveform as argument to the `transcribe` method which returns a promise resolving to the generated tokens when successful. If the model fails during inference the `error` property contains details of the error. If you want to obtain tokens in a streaming fashion, you can also use the sequence property, which is updated with each generated token, similar to the [useLLM](../llms/useLLM.md) hook.
+
+## Example
+
+```typescript
+import { Button, Text } from 'react-native';
+import { useSpeechToText } from 'react-native-executorch';
+
+function App() {
+ const { loadAudio, transcribe, sequence, error } = useSpeechToText({
+ modelName: 'whisper',
+ });
+
+ const audioUrl = ...; // URL with audio to transcribe
+
+ return (
+
+
+ );
+}
+```
+
+## Supported models
+
+| Model | Language |
+| --------------------------------------------------------------------- | -------- |
+| [Whisper tiny.en](https://huggingface.co/openai/whisper-tiny.en) | English |
+| [Moonshine tiny](https://huggingface.co/UsefulSensors/moonshine-tiny) | English |
+
+## Benchmarks
+
+### Model size
+
+| Model | XNNPACK [MB] |
+| -------------- | ------------ |
+| WHISPER_TINY | 231.0 |
+| MOONSHINE_TINY | 148.9 |
+
+### Memory usage
+
+| Model | Android (XNNPACK) [MB] | iOS (XNNPACK) [MB] |
+| -------------- | ---------------------- | ------------------ |
+| WHISPER_TINY | 900 | 600 |
+| MOONSHINE_TINY | 650 | 560 |
diff --git a/docs/docs/utils/_category_.json b/docs/docs/utils/_category_.json
index 4bbbc17380..fe7e29fe8c 100644
--- a/docs/docs/utils/_category_.json
+++ b/docs/docs/utils/_category_.json
@@ -1,6 +1,6 @@
{
"label": "Utils",
- "position": 6,
+ "position": 7,
"link": {
"type": "generated-index"
}