Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .cspell-wordlist.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,3 +53,5 @@ QINT
FNUZ
wordlist
jitpack
coreml
mobilenetv
4 changes: 1 addition & 3 deletions apps/computer-vision/app/classification/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ export default function ClassificationScreen() {
);
const [imageUri, setImageUri] = useState('');

const model = useClassification({
modelSource: EFFICIENTNET_V2_S,
});
const model = useClassification({ model: EFFICIENTNET_V2_S });
const { setGlobalGenerating } = useContext(GeneratingContext);
useEffect(() => {
setGlobalGenerating(model.isGenerating);
Expand Down
4 changes: 1 addition & 3 deletions apps/computer-vision/app/image_segmentation/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,7 @@ const numberToColor: number[][] = [
];

export default function ImageSegmentationScreen() {
const model = useImageSegmentation({
modelSource: DEEPLAB_V3_RESNET50,
});
const model = useImageSegmentation({ model: DEEPLAB_V3_RESNET50 });
const { setGlobalGenerating } = useContext(GeneratingContext);
useEffect(() => {
setGlobalGenerating(model.isGenerating);
Expand Down
4 changes: 1 addition & 3 deletions apps/computer-vision/app/object_detection/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ export default function ObjectDetectionScreen() {
height: number;
}>();

const ssdLite = useObjectDetection({
modelSource: SSDLITE_320_MOBILENET_V3_LARGE,
});
const ssdLite = useObjectDetection({ model: SSDLITE_320_MOBILENET_V3_LARGE });
const { setGlobalGenerating } = useContext(GeneratingContext);
useEffect(() => {
setGlobalGenerating(ssdLite.isGenerating);
Expand Down
18 changes: 2 additions & 16 deletions apps/computer-vision/app/ocr/index.tsx
Original file line number Diff line number Diff line change
@@ -1,13 +1,7 @@
import Spinner from 'react-native-loading-spinner-overlay';
import { BottomBar } from '../../components/BottomBar';
import { getImage } from '../../utils';
import {
DETECTOR_CRAFT_800,
RECOGNIZER_EN_CRNN_128,
RECOGNIZER_EN_CRNN_256,
RECOGNIZER_EN_CRNN_512,
useOCR,
} from 'react-native-executorch';
import { useOCR, OCR_ENGLISH } from 'react-native-executorch';
import { View, StyleSheet, Image, Text, ScrollView } from 'react-native';
import ImageWithBboxes2 from '../../components/ImageWithOCRBboxes';
import React, { useContext, useEffect, useState } from 'react';
Expand All @@ -22,15 +16,7 @@ export default function OCRScreen() {
height: number;
}>();

const model = useOCR({
detectorSource: DETECTOR_CRAFT_800,
recognizerSources: {
recognizerLarge: RECOGNIZER_EN_CRNN_512,
recognizerMedium: RECOGNIZER_EN_CRNN_256,
recognizerSmall: RECOGNIZER_EN_CRNN_128,
},
language: 'en',
});
const model = useOCR({ model: OCR_ENGLISH });
const { setGlobalGenerating } = useContext(GeneratingContext);
useEffect(() => {
setGlobalGenerating(model.isGenerating);
Expand Down
18 changes: 2 additions & 16 deletions apps/computer-vision/app/ocr_vertical/index.tsx
Original file line number Diff line number Diff line change
@@ -1,13 +1,7 @@
import Spinner from 'react-native-loading-spinner-overlay';
import { BottomBar } from '../../components/BottomBar';
import { getImage } from '../../utils';
import {
DETECTOR_CRAFT_1280,
DETECTOR_CRAFT_320,
RECOGNIZER_EN_CRNN_512,
RECOGNIZER_EN_CRNN_64,
useVerticalOCR,
} from 'react-native-executorch';
import { useVerticalOCR, VERTICAL_OCR_ENGLISH } from 'react-native-executorch';
import { View, StyleSheet, Image, Text, ScrollView } from 'react-native';
import ImageWithBboxes2 from '../../components/ImageWithOCRBboxes';
import React, { useContext, useEffect, useState } from 'react';
Expand All @@ -22,15 +16,7 @@ export default function VerticalOCRScree() {
height: number;
}>();
const model = useVerticalOCR({
detectorSources: {
detectorLarge: DETECTOR_CRAFT_1280,
detectorNarrow: DETECTOR_CRAFT_320,
},
recognizerSources: {
recognizerLarge: RECOGNIZER_EN_CRNN_512,
recognizerSmall: RECOGNIZER_EN_CRNN_64,
},
language: 'en',
model: VERTICAL_OCR_ENGLISH,
independentCharacters: true,
});
const { setGlobalGenerating } = useContext(GeneratingContext);
Expand Down
4 changes: 1 addition & 3 deletions apps/computer-vision/app/style_transfer/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ import { GeneratingContext } from '../../context';
import ScreenWrapper from '../../ScreenWrapper';

export default function StyleTransferScreen() {
const model = useStyleTransfer({
modelSource: STYLE_TRANSFER_CANDY,
});
const model = useStyleTransfer({ model: STYLE_TRANSFER_CANDY });
const { setGlobalGenerating } = useContext(GeneratingContext);
useEffect(() => {
setGlobalGenerating(model.isGenerating);
Expand Down
13 changes: 2 additions & 11 deletions apps/llm/app/llm/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,7 @@ import {
} from 'react-native';
import SendIcon from '../../assets/icons/send_icon.svg';
import Spinner from 'react-native-loading-spinner-overlay';
import {
LLAMA3_2_1B_QLORA,
LLAMA3_2_TOKENIZER,
LLAMA3_2_TOKENIZER_CONFIG,
useLLM,
} from 'react-native-executorch';
import { useLLM, LLAMA3_2_1B_QLORA } from 'react-native-executorch';
import PauseIcon from '../../assets/icons/pause_icon.svg';
import ColorPalette from '../../colors';
import Messages from '../../components/Messages';
Expand All @@ -35,11 +30,7 @@ function LLMScreen() {
const textInputRef = useRef<TextInput>(null);
const { setGlobalGenerating } = useContext(GeneratingContext);

const llm = useLLM({
modelSource: LLAMA3_2_1B_QLORA,
tokenizerSource: LLAMA3_2_TOKENIZER,
tokenizerConfigSource: LLAMA3_2_TOKENIZER_CONFIG,
});
const llm = useLLM({ model: LLAMA3_2_1B_QLORA });

useEffect(() => {
if (llm.error) {
Expand Down
13 changes: 1 addition & 12 deletions apps/llm/app/llm_structured_output/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@ import {
import SendIcon from '../../assets/icons/send_icon.svg';
import Spinner from 'react-native-loading-spinner-overlay';
import {
QWEN3_TOKENIZER,
QWEN3_TOKENIZER_CONFIG,
useLLM,
fixAndValidateStructuredOutput,
getStructuredOutputPrompt,
Expand Down Expand Up @@ -75,12 +73,7 @@ function LLMScreen() {
const textInputRef = useRef<TextInput>(null);
const { setGlobalGenerating } = useContext(GeneratingContext);

const llm = useLLM({
// try out 4B model it this one struggles with following structured output
modelSource: QWEN3_1_7B_QUANTIZED,
tokenizerSource: QWEN3_TOKENIZER,
tokenizerConfigSource: QWEN3_TOKENIZER_CONFIG,
});
const llm = useLLM({ model: QWEN3_1_7B_QUANTIZED }); // try out 4B model if 1.7B struggles with following structured output

useEffect(() => {
setGlobalGenerating(llm.isGenerating);
Expand All @@ -89,10 +82,6 @@ function LLMScreen() {
const { configure } = llm;
useEffect(() => {
const formattingInstructions = getStructuredOutputPrompt(responseSchema);
// const formattingInstructionsWithZod = getStructuredOutputPrompt(
// responseSchemaWithZod
// );

const prompt = `Your goal is to parse user's messages and return them in JSON format. Don't respond to user. Simply return JSON with user's question parsed. \n${formattingInstructions}\n /no_think`;

configure({
Expand Down
10 changes: 2 additions & 8 deletions apps/llm/app/llm_tool_calling/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,9 @@ import SWMIcon from '../../assets/icons/swm_icon.svg';
import SendIcon from '../../assets/icons/send_icon.svg';
import Spinner from 'react-native-loading-spinner-overlay';
import {
HAMMER2_1_1_5B,
HAMMER2_1_TOKENIZER,
HAMMER2_1_TOKENIZER_CONFIG,
useLLM,
DEFAULT_SYSTEM_PROMPT,
HAMMER2_1_1_5B_QUANTIZED,
} from 'react-native-executorch';
import PauseIcon from '../../assets/icons/pause_icon.svg';
import ColorPalette from '../../colors';
Expand All @@ -41,11 +39,7 @@ function LLMToolCallingScreen() {
const textInputRef = useRef<TextInput>(null);
const { setGlobalGenerating } = useContext(GeneratingContext);

const llm = useLLM({
modelSource: HAMMER2_1_1_5B,
tokenizerSource: HAMMER2_1_TOKENIZER,
tokenizerConfigSource: HAMMER2_1_TOKENIZER_CONFIG,
});
const llm = useLLM({ model: HAMMER2_1_1_5B_QUANTIZED });

useEffect(() => {
setGlobalGenerating(llm.isGenerating);
Expand Down
11 changes: 3 additions & 8 deletions apps/llm/app/voice_chat/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ import {
useSpeechToText,
useLLM,
QWEN3_0_6B_QUANTIZED,
QWEN3_TOKENIZER,
QWEN3_TOKENIZER_CONFIG,
MOONSHINE_TINY,
} from 'react-native-executorch';
import PauseIcon from '../../assets/icons/pause_icon.svg';
import MicIcon from '../../assets/icons/mic_icon.svg';
Expand Down Expand Up @@ -68,13 +67,9 @@ function VoiceChatScreen() {
const messageRecorded = useRef<boolean>(false);
const { setGlobalGenerating } = useContext(GeneratingContext);

const llm = useLLM({
modelSource: QWEN3_0_6B_QUANTIZED,
tokenizerSource: QWEN3_TOKENIZER,
tokenizerConfigSource: QWEN3_TOKENIZER_CONFIG,
});
const llm = useLLM({ model: QWEN3_0_6B_QUANTIZED });
const speechToText = useSpeechToText({
modelName: 'moonshine',
model: MOONSHINE_TINY,
windowSize: 3,
overlapSeconds: 1.2,
});
Expand Down
4 changes: 2 additions & 2 deletions apps/speech-to-text/screens/SpeechToTextScreen.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { useSpeechToText } from 'react-native-executorch';
import { MOONSHINE_TINY, useSpeechToText } from 'react-native-executorch';
import React from 'react';
import {
Text,
Expand Down Expand Up @@ -54,7 +54,7 @@ export const SpeechToTextScreen = () => {
sequence,
error,
transcribe,
} = useSpeechToText({ modelName: 'moonshine', streamingConfig: 'balanced' });
} = useSpeechToText({ model: MOONSHINE_TINY });

const loadAudio = async (url: string) => {
const audioContext = new AudioContext({ sampleRate: 16e3 });
Expand Down
4 changes: 2 additions & 2 deletions apps/text-embeddings/app/clip-embeddings/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ export default function ClipEmbeddingsScreenWrapper() {
}

function ClipEmbeddingsScreen() {
const textModel = useTextEmbeddings(CLIP_VIT_BASE_PATCH32_TEXT);
const imageModel = useImageEmbeddings(CLIP_VIT_BASE_PATCH32_IMAGE);
const textModel = useTextEmbeddings({ model: CLIP_VIT_BASE_PATCH32_TEXT });
const imageModel = useImageEmbeddings({ model: CLIP_VIT_BASE_PATCH32_IMAGE });

const [inputSentence, setInputSentence] = useState('');
const [sentencesWithEmbeddings, setSentencesWithEmbeddings] = useState<
Expand Down
11 changes: 2 additions & 9 deletions apps/text-embeddings/app/text-embeddings/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,7 @@ import {
Platform,
} from 'react-native';
import { Ionicons } from '@expo/vector-icons';
import {
useTextEmbeddings,
ALL_MINILM_L6_V2,
ALL_MINILM_L6_V2_TOKENIZER,
} from 'react-native-executorch';
import { useTextEmbeddings, ALL_MINILM_L6_V2 } from 'react-native-executorch';
import { useIsFocused } from '@react-navigation/native';
import { dotProduct } from '../../utils/math';

Expand All @@ -26,10 +22,7 @@ export default function TextEmbeddingsScreenWrapper() {
}

function TextEmbeddingsScreen() {
const model = useTextEmbeddings({
modelSource: ALL_MINILM_L6_V2,
tokenizerSource: ALL_MINILM_L6_V2_TOKENIZER,
});
const model = useTextEmbeddings({ model: ALL_MINILM_L6_V2 });

const [inputSentence, setInputSentence] = useState('');
const [sentencesWithEmbeddings, setSentencesWithEmbeddings] = useState<
Expand Down
Loading