-void inverseRotatePoints(std::array &points,
- const FrameOrientation &orient, cv::Size rotatedSize) {
+concept Point2D = requires(P &p) {
+ requires std::is_arithmetic_v>;
+ requires std::is_arithmetic_v>;
+};
+
+/**
+ * @brief Map a sequence of points from rotated-frame space back to screen
+ * space. Inverse of rotateFrameForModel for a collection of points.
+ *
+ * Works on any iterable whose elements satisfy {@link Point2D}
+ * (e.g. `std::array`, `std::vector
`).
+ * rotatedSize is the rotated frame size (rotated.size()).
+ */
+template
+ requires Point2D
+void inverseRotatePoints(Points &points, const FrameOrientation &orient,
+ cv::Size rotatedSize) {
const float w = static_cast(rotatedSize.width);
const float h = static_cast(rotatedSize.height);
+ using Coord = decltype(std::declval().begin()->x);
+
for (auto &p : points) {
- float x = p.x;
- float y = p.y;
+ float x = static_cast(p.x);
+ float y = static_cast(p.y);
switch (orient.orientation) {
case Orientation::Up:
// landscape-left → portrait: nx = h-y, ny = x
- p.x = h - y;
- p.y = x;
+ p.x = static_cast(h - y);
+ p.y = static_cast(x);
break;
case Orientation::Right:
- // upside-down portrait → portrait: nx = w-x, ny = h-y
- p.x = w - x;
- p.y = h - y;
+#if defined(__APPLE__)
+ // iOS upside-down portrait → portrait: nx = w-x, ny = h-y
+ p.x = static_cast(w - x);
+ p.y = static_cast(h - y);
+#endif
+ // Android front-cam upright portrait: rotated frame already in
+ // screen space (mirror-selfie portrait), no inverse needed.
break;
case Orientation::Down:
// landscape-right → portrait: nx = y, ny = w-x
- p.x = y;
- p.y = w - x;
+ p.x = static_cast(y);
+ p.y = static_cast(w - x);
break;
case Orientation::Left:
break;
@@ -105,8 +125,8 @@ void inverseRotatePoints(std::array &points,
float sw = swapped ? h : w;
float sh = swapped ? w : h;
for (auto &p : points) {
- p.x = sw - p.x;
- p.y = sh - p.y;
+ p.x = static_cast(sw - static_cast(p.x));
+ p.y = static_cast(sh - static_cast(p.y));
}
}
#endif
diff --git a/packages/react-native-executorch/src/constants/modelUrls.ts b/packages/react-native-executorch/src/constants/modelUrls.ts
index 432f915eef..6fb20f9ca3 100644
--- a/packages/react-native-executorch/src/constants/modelUrls.ts
+++ b/packages/react-native-executorch/src/constants/modelUrls.ts
@@ -663,6 +663,17 @@ export const YOLO26X = {
modelSource: YOLO26X_DETECTION_MODEL,
} as const;
+// YOLO26 Pose Estimation
+const YOLO26N_POSE_MODEL = `${URL_PREFIX}-yolo26-pose/${NEXT_VERSION_TAG}/yolo26n/xnnpack/yolo26n-pose_xnnpack.pte`;
+
+/**
+ * @category Models - Pose Estimation
+ */
+export const YOLO26N_POSE = {
+ modelName: 'yolo26n-pose',
+ modelSource: YOLO26N_POSE_MODEL,
+} as const;
+
// Style transfer
const STYLE_TRANSFER_CANDY_MODEL =
Platform.OS === `ios`
diff --git a/packages/react-native-executorch/src/constants/poseEstimation.ts b/packages/react-native-executorch/src/constants/poseEstimation.ts
new file mode 100644
index 0000000000..6d3929e8ef
--- /dev/null
+++ b/packages/react-native-executorch/src/constants/poseEstimation.ts
@@ -0,0 +1,24 @@
+/**
+ * Standard COCO keypoint enum (17 keypoints).
+ * Use for type-safe keypoint access: `keypoints[CocoKeypoint.NOSE]`
+ * @category Types
+ */
+export enum CocoKeypoint {
+ NOSE = 0,
+ LEFT_EYE = 1,
+ RIGHT_EYE = 2,
+ LEFT_EAR = 3,
+ RIGHT_EAR = 4,
+ LEFT_SHOULDER = 5,
+ RIGHT_SHOULDER = 6,
+ LEFT_ELBOW = 7,
+ RIGHT_ELBOW = 8,
+ LEFT_WRIST = 9,
+ RIGHT_WRIST = 10,
+ LEFT_HIP = 11,
+ RIGHT_HIP = 12,
+ LEFT_KNEE = 13,
+ RIGHT_KNEE = 14,
+ LEFT_ANKLE = 15,
+ RIGHT_ANKLE = 16,
+}
diff --git a/packages/react-native-executorch/src/hooks/computer_vision/usePoseEstimation.ts b/packages/react-native-executorch/src/hooks/computer_vision/usePoseEstimation.ts
new file mode 100644
index 0000000000..2eda27deaa
--- /dev/null
+++ b/packages/react-native-executorch/src/hooks/computer_vision/usePoseEstimation.ts
@@ -0,0 +1,60 @@
+import {
+ PoseEstimationModule,
+ PoseEstimationKeypoints,
+} from '../../modules/computer_vision/PoseEstimationModule';
+import {
+ PoseEstimationModelSources,
+ PoseEstimationProps,
+ PoseEstimationType,
+ PoseEstimationOptions,
+} from '../../types/poseEstimation';
+import { PixelData } from '../../types/common';
+import { useModuleFactory } from '../useModuleFactory';
+
+/**
+ * React hook for managing a Pose Estimation model instance.
+ * @typeParam C - A {@link PoseEstimationModelSources} config specifying which built-in model to load.
+ * @category Hooks
+ * @param props - Configuration object containing `model` config and optional `preventLoad` flag.
+ * @returns An object with model state (`error`, `isReady`, `isGenerating`, `downloadProgress`) and typed `forward` and `runOnFrame` functions.
+ */
+export const usePoseEstimation = ({
+ model,
+ preventLoad = false,
+}: PoseEstimationProps): PoseEstimationType<
+ PoseEstimationKeypoints
+> => {
+ const {
+ error,
+ isReady,
+ isGenerating,
+ downloadProgress,
+ runForward,
+ runOnFrame,
+ instance,
+ } = useModuleFactory({
+ factory: (config, onProgress) =>
+ PoseEstimationModule.fromModelName(config, onProgress),
+ config: model,
+ deps: [model.modelName, model.modelSource],
+ preventLoad,
+ });
+
+ const forward = (
+ input: string | PixelData,
+ options?: PoseEstimationOptions
+ ) => runForward((inst) => inst.forward(input, options));
+
+ const getAvailableInputSizes = () =>
+ instance?.getAvailableInputSizes() ?? undefined;
+
+ return {
+ error,
+ isReady,
+ isGenerating,
+ downloadProgress,
+ forward,
+ runOnFrame,
+ getAvailableInputSizes,
+ };
+};
diff --git a/packages/react-native-executorch/src/index.ts b/packages/react-native-executorch/src/index.ts
index 7cc148d16b..96d167a7d2 100644
--- a/packages/react-native-executorch/src/index.ts
+++ b/packages/react-native-executorch/src/index.ts
@@ -58,6 +58,11 @@ declare global {
normStd: Triple | [],
labelNames: string[]
) => Promise;
+ var loadPoseEstimation: (
+ source: string,
+ normMean: Triple | [],
+ normStd: Triple | []
+ ) => Promise;
var loadExecutorchModule: (source: string) => Promise;
var loadTokenizerModule: (source: string) => Promise;
var loadImageEmbeddings: (source: string) => Promise;
@@ -124,6 +129,7 @@ if (
global.loadExecutorchModule == null ||
global.loadClassification == null ||
global.loadObjectDetection == null ||
+ global.loadPoseEstimation == null ||
global.loadTokenizerModule == null ||
global.loadTextEmbeddings == null ||
global.loadImageEmbeddings == null ||
@@ -165,6 +171,7 @@ export * from './hooks/computer_vision/useOCR';
export * from './hooks/computer_vision/useVerticalOCR';
export * from './hooks/computer_vision/useImageEmbeddings';
export * from './hooks/computer_vision/useTextToImage';
+export * from './hooks/computer_vision/usePoseEstimation';
export * from './hooks/natural_language_processing/useLLM';
export * from './hooks/natural_language_processing/useSpeechToText';
@@ -186,6 +193,7 @@ export * from './modules/computer_vision/OCRModule';
export * from './modules/computer_vision/VerticalOCRModule';
export * from './modules/computer_vision/ImageEmbeddingsModule';
export * from './modules/computer_vision/TextToImageModule';
+export * from './modules/computer_vision/PoseEstimationModule';
export * from './modules/natural_language_processing/LLMModule';
export * from './modules/natural_language_processing/SpeechToTextModule';
@@ -223,6 +231,7 @@ export * from './types/classification';
export * from './types/imageEmbeddings';
export * from './types/styleTransfer';
export * from './types/tti';
+export * from './types/poseEstimation';
// constants
export * from './constants/commonVision';
@@ -232,6 +241,7 @@ export * from './constants/ocr/models';
export * from './constants/tts/models';
export * from './constants/tts/voices';
export * from './constants/llmDefaults';
+export * from './constants/poseEstimation';
export { RnExecutorchError } from './errors/errorUtils';
export { RnExecutorchErrorCode } from './errors/ErrorCodes';
diff --git a/packages/react-native-executorch/src/modules/computer_vision/PoseEstimationModule.ts b/packages/react-native-executorch/src/modules/computer_vision/PoseEstimationModule.ts
new file mode 100644
index 0000000000..ff2b68b1fd
--- /dev/null
+++ b/packages/react-native-executorch/src/modules/computer_vision/PoseEstimationModule.ts
@@ -0,0 +1,328 @@
+import {
+ Frame,
+ LabelEnum,
+ PixelData,
+ ResourceSource,
+} from '../../types/common';
+import {
+ Keypoint,
+ PersonKeypoints,
+ PoseDetections,
+ PoseEstimationOptions,
+ PoseEstimationModelSources,
+ PoseEstimationModelName,
+ PoseEstimationConfig,
+} from '../../types/poseEstimation';
+import { RnExecutorchErrorCode } from '../../errors/ErrorCodes';
+import { RnExecutorchError } from '../../errors/errorUtils';
+import { VisionModule } from './VisionModule';
+import { fetchModelPath } from './VisionLabeledModule';
+import { CocoKeypoint } from '../../constants/poseEstimation';
+import { ResolveConfigOrType } from '../../types/computerVision';
+
+const YOLO_POSE_CONFIG = {
+ keypointMap: CocoKeypoint,
+ preprocessorConfig: undefined,
+ availableInputSizes: [384, 512, 640] as const,
+ defaultInputSize: 384,
+ defaultDetectionThreshold: 0.5,
+ defaultKeypointThreshold: 0.5,
+} satisfies PoseEstimationConfig;
+
+const ModelConfigs = {
+ 'yolo26n-pose': YOLO_POSE_CONFIG,
+} as const satisfies Record<
+ PoseEstimationModelName,
+ PoseEstimationConfig
+>;
+
+type ModelConfigsType = typeof ModelConfigs;
+
+/**
+ * Resolves the {@link LabelEnum} for a given built-in pose estimation model name.
+ * @typeParam M - A built-in model name from {@link PoseEstimationModelName}.
+ * @category Types
+ */
+export type PoseEstimationKeypoints =
+ (typeof ModelConfigs)[M]['keypointMap'];
+
+type ModelNameOf = C['modelName'];
+
+/** @internal */
+type ResolveKeypoints =
+ ResolveConfigOrType;
+
+function mapPersonKeypoints(
+ raw: Keypoint[][],
+ entries: [string, number][],
+ maxIndex: number
+): PersonKeypoints[] {
+ 'worklet';
+ if (raw.length > 0 && raw[0]!.length <= maxIndex) {
+ throw new Error(
+ `Keypoint map references index ${maxIndex} but model returned ${raw[0]!.length} keypoints per person — keypointMap is incompatible with this model.`
+ );
+ }
+ const out: PersonKeypoints[] = [];
+ for (const person of raw) {
+ const named: Record = {};
+ for (const [name, idx] of entries) named[name] = person[idx]!;
+ out.push(named as PersonKeypoints);
+ }
+ return out;
+}
+
+/**
+ * Pose estimation module for detecting human body keypoints.
+ * @typeParam T - Either a built-in model name (e.g. `'yolo26n-pose'`)
+ * or a custom {@link LabelEnum} keypoint map.
+ * @category Typescript API
+ */
+export class PoseEstimationModule<
+ T extends PoseEstimationModelName | LabelEnum,
+> extends VisionModule>> {
+ private readonly keypointMap: ResolveKeypoints;
+ private readonly modelConfig: PoseEstimationConfig;
+ // Numeric TS enums double-list
+ // their keys at runtime (value → name); we keep only the (name, index) pairs
+ private readonly keypointEntries: [string, number][];
+ private readonly maxKeypointIndex: number;
+
+ private constructor(
+ keypointMap: ResolveKeypoints,
+ modelConfig: PoseEstimationConfig,
+ nativeModule: unknown
+ ) {
+ super();
+ this.keypointMap = keypointMap;
+ this.modelConfig = modelConfig;
+ this.nativeModule = nativeModule;
+ this.keypointEntries = [];
+ for (const [name, value] of Object.entries(keypointMap)) {
+ if (typeof value === 'number') this.keypointEntries.push([name, value]);
+ }
+ this.maxKeypointIndex = Math.max(...this.keypointEntries.map(([, v]) => v));
+ }
+
+ /**
+ * Creates a pose estimation instance for a built-in model.
+ * @param namedSources - A {@link PoseEstimationModelSources} object specifying which model to load.
+ * @param onDownloadProgress - Optional callback to monitor download progress (0-1).
+ * @returns A Promise resolving to a `PoseEstimationModule` instance typed to the model's keypoint map.
+ */
+ static async fromModelName(
+ namedSources: C,
+ onDownloadProgress: (progress: number) => void = () => {}
+ ): Promise>> {
+ const { modelSource } = namedSources;
+ const modelConfig = ModelConfigs[
+ namedSources.modelName
+ ] as PoseEstimationConfig;
+ const { keypointMap, preprocessorConfig } = modelConfig;
+ const normMean = preprocessorConfig?.normMean ?? [];
+ const normStd = preprocessorConfig?.normStd ?? [];
+
+ const modelPath = await fetchModelPath(modelSource, onDownloadProgress);
+ const nativeModule = await global.loadPoseEstimation(
+ modelPath,
+ normMean,
+ normStd
+ );
+
+ return new PoseEstimationModule>(
+ keypointMap as ResolveKeypoints>,
+ modelConfig,
+ nativeModule
+ );
+ }
+
+ /**
+ * Creates a pose estimation instance with a user-provided model binary and keypoint map.
+ * Use this when working with a custom-exported model that is not one of the built-in presets.
+ * @param modelSource - A fetchable resource pointing to the model binary.
+ * @param config - A {@link PoseEstimationConfig} object with the keypoint map and optional preprocessing parameters.
+ * @param onDownloadProgress - Optional callback to monitor download progress (0-1).
+ * @returns A Promise resolving to a `PoseEstimationModule` instance typed to the provided keypoint map.
+ */
+ static async fromCustomModel(
+ modelSource: ResourceSource,
+ config: PoseEstimationConfig,
+ onDownloadProgress: (progress: number) => void = () => {}
+ ): Promise> {
+ const { keypointMap, preprocessorConfig } = config;
+ const normMean = preprocessorConfig?.normMean ?? [];
+ const normStd = preprocessorConfig?.normStd ?? [];
+
+ const modelPath = await fetchModelPath(modelSource, onDownloadProgress);
+ const nativeModule = await global.loadPoseEstimation(
+ modelPath,
+ normMean,
+ normStd
+ );
+
+ return new PoseEstimationModule(
+ keypointMap as ResolveKeypoints,
+ config,
+ nativeModule
+ );
+ }
+
+ /**
+ * Get the keypoint map for this model.
+ * @returns Map of keypoint names to indices, e.g. `{ NOSE: 0, LEFT_EYE: 1, ... }`.
+ */
+ getKeypointMap(): ResolveKeypoints {
+ return this.keypointMap;
+ }
+
+ /**
+ * Returns the available input sizes for this model, or undefined if the model accepts any size.
+ * @returns a readonly number[] specifying what input sizes the model supports.
+ */
+ getAvailableInputSizes(): readonly number[] | undefined {
+ return this.modelConfig.availableInputSizes;
+ }
+
+ /**
+ * Override runOnFrame to provide an options-based API for VisionCamera integration.
+ * @returns A worklet function for frame processing.
+ */
+ override get runOnFrame(): (
+ frame: Frame,
+ isFrontCamera: boolean,
+ options?: PoseEstimationOptions
+ ) => PoseDetections> {
+ if (!this.nativeModule) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ModuleNotLoaded,
+ 'Model is not loaded. Ensure the model has been loaded before using runOnFrame.'
+ );
+ }
+
+ const nativeGenerateFromFrame = this.nativeModule.generateFromFrame;
+ const defaultDetectionThreshold =
+ this.modelConfig.defaultDetectionThreshold ?? 0.5;
+ const defaultKeypointThreshold =
+ this.modelConfig.defaultKeypointThreshold ?? 0.5;
+ const defaultInputSize = this.modelConfig.defaultInputSize;
+ const availableInputSizes = this.modelConfig.availableInputSizes;
+ const keypointEntries = this.keypointEntries;
+ const maxKeypointIndex = this.maxKeypointIndex;
+ return (
+ frame: Frame,
+ isFrontCamera: boolean,
+ options?: PoseEstimationOptions
+ ): PoseDetections> => {
+ 'worklet';
+
+ const detectionThreshold =
+ options?.detectionThreshold ?? defaultDetectionThreshold;
+ const keypointThreshold =
+ options?.keypointThreshold ?? defaultKeypointThreshold;
+ const inputSize = options?.inputSize ?? defaultInputSize;
+
+ // Validate inputSize
+ if (
+ availableInputSizes &&
+ inputSize !== undefined &&
+ !availableInputSizes.includes(inputSize)
+ ) {
+ throw new Error(
+ `Invalid inputSize: ${inputSize}. Available sizes: ${availableInputSizes.join(', ')}`
+ );
+ }
+
+ const methodName =
+ inputSize !== undefined ? `forward_${inputSize}` : 'forward';
+
+ let nativeBuffer: { pointer: bigint; release(): void } | null = null;
+ try {
+ nativeBuffer = frame.getNativeBuffer();
+ const frameData = {
+ nativeBuffer: nativeBuffer.pointer,
+ orientation: frame.orientation,
+ isMirrored: isFrontCamera,
+ };
+ const raw: Keypoint[][] = nativeGenerateFromFrame(
+ frameData,
+ detectionThreshold,
+ keypointThreshold,
+ methodName
+ );
+ return mapPersonKeypoints>(
+ raw,
+ keypointEntries,
+ maxKeypointIndex
+ );
+ } finally {
+ if (nativeBuffer?.release) {
+ nativeBuffer.release();
+ }
+ }
+ };
+ }
+
+ /**
+ * Run pose estimation on an image.
+ * @param input - Image path/URI or PixelData
+ * @param options - Detection options including inputSize for multi-method models
+ * @returns Array of detected people, each with keypoints accessible via the keypoint enum
+ */
+ override async forward(
+ input: string | PixelData,
+ options?: PoseEstimationOptions
+ ): Promise>> {
+ if (this.nativeModule == null) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ModuleNotLoaded,
+ 'Model not loaded. Please load the model before calling forward().'
+ );
+ }
+
+ const detectionThreshold =
+ options?.detectionThreshold ??
+ this.modelConfig.defaultDetectionThreshold ??
+ 0.5;
+ const keypointThreshold =
+ options?.keypointThreshold ??
+ this.modelConfig.defaultKeypointThreshold ??
+ 0.5;
+ const inputSize = options?.inputSize ?? this.modelConfig.defaultInputSize;
+
+ // Validate inputSize against availableInputSizes
+ if (
+ this.modelConfig.availableInputSizes &&
+ inputSize !== undefined &&
+ !this.modelConfig.availableInputSizes.includes(inputSize)
+ ) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidArgument,
+ `Invalid inputSize: ${inputSize}. Available sizes: ${this.modelConfig.availableInputSizes.join(', ')}`
+ );
+ }
+
+ const methodName =
+ inputSize !== undefined ? `forward_${inputSize}` : 'forward';
+
+ const raw: Keypoint[][] =
+ typeof input === 'string'
+ ? await this.nativeModule.generateFromString(
+ input,
+ detectionThreshold,
+ keypointThreshold,
+ methodName
+ )
+ : await this.nativeModule.generateFromPixels(
+ input,
+ detectionThreshold,
+ keypointThreshold,
+ methodName
+ );
+
+ return mapPersonKeypoints>(
+ raw,
+ this.keypointEntries,
+ this.maxKeypointIndex
+ );
+ }
+}
diff --git a/packages/react-native-executorch/src/types/computerVision.ts b/packages/react-native-executorch/src/types/computerVision.ts
index a5d1dee7b2..da15999100 100644
--- a/packages/react-native-executorch/src/types/computerVision.ts
+++ b/packages/react-native-executorch/src/types/computerVision.ts
@@ -1,5 +1,18 @@
import { LabelEnum } from './common';
+/*
+ * Automatically resolves the type to either Configs[NameOrType][OutputKey], if the NameOrType
+ * is a key of Configs. Otherwise, returns NameOrType.
+ * @internal
+ */
+export type ResolveConfigOrType<
+ NameOrType,
+ Configs extends Record>,
+ OutputKey extends string = 'output',
+> = NameOrType extends keyof Configs
+ ? Configs[NameOrType][OutputKey]
+ : NameOrType;
+
/**
* Given a model configs record (mapping model names to `{ labelMap }`) and a
* type `T` (either a model name key or a raw {@link LabelEnum}), resolves to
@@ -7,10 +20,6 @@ import { LabelEnum } from './common';
* @internal
*/
export type ResolveLabels<
- T,
+ NameOrLabels,
Configs extends Record,
-> = T extends keyof Configs
- ? Configs[T]['labelMap']
- : T extends LabelEnum
- ? T
- : never;
+> = ResolveConfigOrType;
diff --git a/packages/react-native-executorch/src/types/poseEstimation.ts b/packages/react-native-executorch/src/types/poseEstimation.ts
new file mode 100644
index 0000000000..03afc592c3
--- /dev/null
+++ b/packages/react-native-executorch/src/types/poseEstimation.ts
@@ -0,0 +1,159 @@
+import { Frame, LabelEnum, PixelData, ResourceSource } from './common';
+import { CocoKeypoint } from '../constants/poseEstimation';
+import { RnExecutorchError } from '../errors/errorUtils';
+
+export { CocoKeypoint };
+
+/**
+ * A single keypoint with x, y coordinates
+ * @category Types
+ */
+export interface Keypoint {
+ x: number;
+ y: number;
+}
+
+/**
+ * Keypoints for a single detected person, keyed by name from the keypoint map.
+ * @typeParam K - The {@link LabelEnum} for this model.
+ * @category Types
+ * @example
+ * ```ts
+ * person.NOSE; // { x, y }
+ * ```
+ */
+export type PersonKeypoints = {
+ readonly [Name in keyof K]: Keypoint;
+};
+
+/**
+ * Pose estimation result containing all detected people.
+ * @category Types
+ */
+export type PoseDetections =
+ PersonKeypoints[];
+
+/**
+ * Configuration for pose estimation model behavior.
+ * @category Types
+ * @typeParam K - The keypoint enum type for this model.
+ */
+export type PoseEstimationConfig = {
+ keypointMap: K;
+ preprocessorConfig?: {
+ normMean?: readonly [number, number, number];
+ normStd?: readonly [number, number, number];
+ };
+ defaultDetectionThreshold?: number;
+ defaultKeypointThreshold?: number;
+} & (
+ | {
+ availableInputSizes: readonly number[];
+ defaultInputSize: number;
+ }
+ | {
+ availableInputSizes?: undefined;
+ defaultInputSize?: undefined;
+ }
+);
+
+/**
+ * Per-model config for {@link PoseEstimationModule.fromModelName}.
+ * Each model name maps to its required fields.
+ * @category Types
+ */
+export type PoseEstimationModelSources = {
+ modelName: 'yolo26n-pose';
+ modelSource: ResourceSource;
+};
+
+/**
+ * Union of all built-in pose estimation model names.
+ * @category Types
+ */
+export type PoseEstimationModelName = PoseEstimationModelSources['modelName'];
+
+/**
+ * Props for usePoseEstimation hook.
+ * @typeParam C - A {@link PoseEstimationModelSources} config specifying which built-in model to load.
+ * @category Types
+ */
+export interface PoseEstimationProps {
+ model: C;
+ preventLoad?: boolean;
+}
+
+/**
+ * Options for pose estimation inference
+ * @category Types
+ */
+export interface PoseEstimationOptions {
+ detectionThreshold?: number;
+ /**
+ * Per-keypoint visibility threshold (0-1). Keypoints whose visibility
+ * score is below this are emitted as (-1, -1) so consumers can skip them.
+ * Defaults to the model config's `defaultKeypointThreshold` (typically 0.5).
+ */
+ keypointThreshold?: number;
+ /**
+ * Input size for multi-method models.
+ * For YOLO models, valid values are typically 384, 512, or 640.
+ * Maps to forward_384, forward_512, forward_640 methods.
+ */
+ inputSize?: number;
+}
+
+/**
+ * Return type of usePoseEstimation hook.
+ * @typeParam K - The {@link LabelEnum} representing the model's keypoint schema.
+ * @category Types
+ */
+export interface PoseEstimationType {
+ /**
+ * Contains the error object if the model failed to load or encountered a runtime error.
+ */
+ error: RnExecutorchError | null;
+
+ /**
+ * Indicates whether the model is loaded and ready to process images.
+ */
+ isReady: boolean;
+
+ /**
+ * Indicates whether the model is currently processing an image.
+ */
+ isGenerating: boolean;
+
+ /**
+ * Represents the download progress of the model binary as a value between 0 and 1.
+ */
+ downloadProgress: number;
+
+ /**
+ * Run pose estimation on an image.
+ * @param input - Image path/URI or PixelData
+ * @param options - Detection options
+ * @returns Array of detected people, each with keypoints accessible via the keypoint enum
+ */
+ forward: (
+ input: string | PixelData,
+ options?: PoseEstimationOptions
+ ) => Promise>;
+
+ /**
+ * Returns the available input sizes for multi-method models.
+ * Returns undefined for single-method models.
+ */
+ getAvailableInputSizes: () => readonly number[] | undefined;
+
+ /**
+ * Synchronous worklet function for real-time VisionCamera frame processing.
+ */
+ runOnFrame:
+ | ((
+ frame: Frame,
+ isFrontCamera: boolean,
+ options?: PoseEstimationOptions
+ ) => PoseDetections)
+ | null;
+}
diff --git a/packages/react-native-executorch/src/utils/ResourceFetcherUtils.ts b/packages/react-native-executorch/src/utils/ResourceFetcherUtils.ts
index 9645afbaa9..46f4b34e2d 100644
--- a/packages/react-native-executorch/src/utils/ResourceFetcherUtils.ts
+++ b/packages/react-native-executorch/src/utils/ResourceFetcherUtils.ts
@@ -150,7 +150,7 @@ export namespace ResourceFetcherUtils {
/**
* Checks whether the given URL conforms to the huggingface.co/software-mansion schema.
* @param url - the URL to the remote file
- * @returns {boolean} Boolean specifying whether the given URL conforms to our HF repo schema
+ * @returns Boolean specifying whether the given URL conforms to our HF repo schema
*/
export function isUrlHfRepo(url: URL): boolean {
return (