From de080e0c712c28121ddedd91ca09ebac658d50e7 Mon Sep 17 00:00:00 2001
From: Brian Rosenberg
Date: Mon, 17 Mar 2025 10:38:42 -0400
Subject: [PATCH 1/3] Add Python3.12 install instructions
---
docs/docs/Development-Environment-Guide.md | 11 +++-
.../Development-Environment-Guide/index.html | 12 +++-
docs/site/index.html | 2 +-
docs/site/search/search_index.json | 4 +-
docs/site/sitemap.xml | 62 +++++++++----------
5 files changed, 55 insertions(+), 36 deletions(-)
diff --git a/docs/docs/Development-Environment-Guide.md b/docs/docs/Development-Environment-Guide.md
index 5344eb176d6b..5fd8798bb3b3 100644
--- a/docs/docs/Development-Environment-Guide.md
+++ b/docs/docs/Development-Environment-Guide.md
@@ -46,7 +46,7 @@ end integration testing.
- Open a terminal and run `sudo apt update`
-- Run `sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl ansible`
+- Run `sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl`
- Run `sudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h`
@@ -194,6 +194,13 @@ cd;
sudo rm -rf /tmp/abseil /tmp/protobuf
```
+- Install Python3.12:
+```bash
+sudo add-apt-repository -y ppa:deadsnakes/ppa
+sudo apt update
+sudo apt install python3.12-dev python3.12-venv
+```
+
- From your home directory run:
```bash
git clone https://github.com/openmpf/openmpf-projects.git --recursive;
@@ -216,6 +223,8 @@ git submodule foreach git checkout develop;
- Run `cd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties`
+- Run `pip3.8 install ansible`
+
- Run `sudo sh -c 'echo "[mpf-child]\nlocalhost" >> /etc/ansible/hosts'`
- Run `mkdir -p ~/.m2/repository/; tar -f /home/mpf/openmpf-projects/openmpf-build-tools/mpf-maven-deps.tar.gz --extract --gzip --directory ~/.m2/repository/`
diff --git a/docs/site/Development-Environment-Guide/index.html b/docs/site/Development-Environment-Guide/index.html
index 6bb54485b0d4..2e5710cbca8b 100644
--- a/docs/site/Development-Environment-Guide/index.html
+++ b/docs/site/Development-Environment-Guide/index.html
@@ -328,7 +328,7 @@ Setup VM
Open a terminal and run sudo apt update
-Run sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl ansible
+Run sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl
Run sudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h
@@ -488,6 +488,13 @@ Setup VM
sudo rm -rf /tmp/abseil /tmp/protobuf
+sudo add-apt-repository -y ppa:deadsnakes/ppa
+sudo apt update
+sudo apt install python3.12-dev python3.12-venv
+
+
- From your home directory run:
git clone https://github.com/openmpf/openmpf-projects.git --recursive;
@@ -518,6 +525,9 @@ Setup VM
Run cd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties
+Run pip3.8 install ansible
+
+
Run sudo sh -c 'echo "[mpf-child]\nlocalhost" >> /etc/ansible/hosts'
diff --git a/docs/site/index.html b/docs/site/index.html
index beb07abe30d9..0960e47e5ff6 100644
--- a/docs/site/index.html
+++ b/docs/site/index.html
@@ -404,5 +404,5 @@ Overview
diff --git a/docs/site/search/search_index.json b/docs/site/search/search_index.json
index 363657a36e3f..ab680462b4ef 100644
--- a/docs/site/search/search_index.json
+++ b/docs/site/search/search_index.json
@@ -1392,7 +1392,7 @@
},
{
"location": "/Development-Environment-Guide/index.html",
- "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\n\n \nWARNING:\n\n For most component developers, these steps are not necessary. Instead,\n refer to the\n \nC++\n,\n \nPython\n, or\n \nJava\n\n README for developing a Docker component in your desired language.\n\n\n\n\n\n \nWARNING:\n This guide is a work in progress and may not be completely\n accurate or comprehensive.\n\n\n\n\nOverview\n\n\nThe following instructions are for setting up an environment for building and\nrunning OpenMPF outside of Docker. They serve as a reference for developers who\nwant to develop the Workflow Manager web application itself and perform end-to-\nend integration testing.\n\n\nSetup VM\n\n\n\n\n\n\nDownload the ISO for the desktop version of Ubuntu 20.04 from\n \nhttps://releases.ubuntu.com/20.04\n.\n\n\n\n\n\n\nCreate an Ubuntu VM using the downloaded iso. This part is different based on\n what VM software you are using.\n\n\n\n\nUse mpf as your username.\n\n\nDuring the initial install, the VM window was small and didn't stretch to\n fill up the screen, but this may be fixed automatically after the installation\n finishes, or there may be additional steps necessary to install tools or\n configure settings based on your VM software.\n\n\n\n\n\n\n\n\nAfter completing the installation, you will likely be prompted to update\n software. You should install the updates.\n\n\n\n\n\n\nOptionally, shutdown the VM and take a snapshot. This will enable you to revert back\n to a clean Ubuntu install in case anything goes wrong.\n\n\n\n\n\n\nOpen a terminal and run \nsudo apt update\n\n\n\n\n\n\nRun \nsudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl ansible\n\n\n\n\n\n\nRun \nsudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h\n\n\n\n\n\n\nRun \nsudo ln --symbolic /usr/bin/cmake /usr/bin/cmake3\n\n\n\n\n\n\nFollow instructions to install Docker:\n \nhttps://docs.docker.com/engine/install/ubuntu/#install-using-the-repository\n\n\n\n\n\n\nOptionally, configure Docker to use socket activation. The advantage of socket activation is\n that systemd will automatically start the Docker daemon when you use \ndocker\n commands:\n\n\n\n\n\n\nsudo systemctl disable docker.service;\nsudo systemctl stop docker.service;\nsudo systemctl enable docker.socket;\n\n\n\n\n\n\n\nFollow instructions so that you can run Docker without sudo:\n \nhttps://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user\n\n\n\n\n\n\nInstall Docker Compose:\n\n\n\n\n\n\nsudo apt update\nsudo apt install docker-compose-plugin\n\n\n\n\n\n\n\nOptionally, stop redis from starting automatically:\n \nsudo systemctl disable redis\n\n\n\n\n\n\nOptionally, stop postgresql from starting automatically:\n \nsudo systemctl disable postgresql\n\n\n\n\n\n\nInitialize Postgres (use \"password\" when prompted for a password):\n\n\n\n\n\n\nsudo -i -u postgres createuser -P mpf\nsudo -i -u postgres createdb -O mpf mpf\n\n\n\n\n\nBuild and install OpenCV:\n\n\n\n\nmkdir /tmp/opencv-contrib;\nwget -O- 'https://github.com/opencv/opencv_contrib/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/opencv-contrib;\nmkdir /tmp/opencv;\ncd /tmp/opencv;\nwget -O- 'https://github.com/opencv/opencv/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip;\ncd opencv-4.9.0;\nmkdir build;\ncd build;\nexport OpenBLAS_HOME=/usr/lib/x86_64-linux-gnu/openblas-pthread; \\\ncmake -DCMAKE_INSTALL_PREFIX:PATH='/opt/opencv-4.9.0' \\\n -DWITH_IPP=false \\\n -DBUILD_EXAMPLES=false \\\n -DBUILD_TESTS=false \\\n -DBUILD_PERF_TESTS=false \\\n -DOPENCV_EXTRA_MODULES_PATH=/tmp/opencv-contrib/opencv_contrib-4.9.0/modules \\\n ..;\nsudo make --jobs \"$(nproc)\" install;\nsudo ln --symbolic '/opt/opencv-4.9.0/include/opencv4/opencv2' /usr/local/include/opencv2;\nsudo sh -c 'echo /opt/opencv-4.9.0/lib > /etc/ld.so.conf.d/mpf.conf'\nsudo ldconfig;\nsudo rm -rf /tmp/opencv-contrib /tmp/opencv;\n\n\n\n\n\nBuild and install the ActiveMQ C++ library:\n\n\n\n\nmkdir /tmp/activemq-cpp;\ncd /tmp/activemq-cpp;\nwget -O- https://dlcdn.apache.org/activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.gz \\\n | tar --extract --gzip;\ncd activemq-cpp-library-3.9.5;\n./configure;\nsudo make --jobs \"$(nproc)\" install;\nsudo rm -rf /tmp/activemq-cpp;\n\n\n\n\n\nInstall NotoEmoji font for markup:\n\n\n\n\nmkdir /tmp/noto;\ncd /tmp/noto;\nwget https://noto-website-2.storage.googleapis.com/pkgs/NotoEmoji-unhinted.zip;\nunzip NotoEmoji-unhinted.zip;\nsudo mkdir --parents /usr/share/fonts/google-noto-emoji;\nsudo cp NotoEmoji-Regular.ttf /usr/share/fonts/google-noto-emoji/;\nsudo chmod a+r /usr/share/fonts/google-noto-emoji/NotoEmoji-Regular.ttf;\nrm -rf /tmp/noto;\n\n\n\n\n\nBuild and install PNG Defry:\n\n\n\n\nmkdir /tmp/pngdefry;\ncd /tmp/pngdefry;\nwget -O- 'https://github.com/openmpf/pngdefry/archive/v1.2.tar.gz' \\\n | tar --extract --gzip;\ncd pngdefry-1.2;\nsudo gcc pngdefry.c -o /usr/local/bin/pngdefry;\nrm -rf /tmp/pngdefry;\n\n\n\n\n\nInstall Maven:\n\n\n\n\nwget -O- 'https://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz' \\\n | sudo tar --extract --gzip --directory /opt;\nsudo ln --symbolic /opt/apache-maven-3.3.3/bin/mvn /usr/local/bin;\n\n\n\n\n\nBuild and install libheif:\n\n\n\n\nmkdir /tmp/libheif;\ncd /tmp/libheif;\nwget -O- https://github.com/strukturag/libheif/archive/refs/tags/v1.12.0.tar.gz \\\n | tar --extract --gzip;\ncd libheif-1.12.0;\nmkdir build;\ncd build;\ncmake3 -DCMAKE_INSTALL_PREFIX=/usr -DWITH_EXAMPLES=false ..;\nsudo make --jobs \"$(nproc)\" install;\ncd;\nsudo rm -rf /tmp/libheif;\n\n\n\n\n\nBuild and install Protocol Buffers:\n\n\n\n\nmkdir /tmp/abseil;\nwget -O- 'https://github.com/abseil/abseil-cpp/releases/download/20240722.0/abseil-cpp-20240722.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/abseil;\n\nmkdir /tmp/protobuf;\ncd /tmp/protobuf;\nwget -O- 'https://github.com/protocolbuffers/protobuf/releases/download/v28.3/protobuf-28.3.tar.gz' \\\n | tar --extract --gzip;\n\ncd protobuf-28.3;\nmkdir build;\ncd build;\ncmake \\\n -DCMAKE_CXX_STANDARD=17 \\\n -DCMAKE_POSITION_INDEPENDENT_CODE=ON \\\n -DABSL_ROOT_DIR=/tmp/abseil/abseil-cpp-20240722.0 \\\n -Dprotobuf_BUILD_TESTS=OFF \\\n ..;\ncmake --build . --parallel \"$(nproc)\";\nsudo cmake --install . --strip;\ncd;\nsudo rm -rf /tmp/abseil /tmp/protobuf\n\n\n\n\n\nFrom your home directory run:\n\n\n\n\ngit clone https://github.com/openmpf/openmpf-projects.git --recursive;\ncd openmpf-projects;\ngit checkout develop;\ngit submodule foreach git checkout develop;\n\n\n\n\n\n\n\nRun: \npip install openmpf-projects/openmpf/trunk/bin/mpf-scripts\n\n\n\n\n\n\nAdd \nPATH=\"$HOME/.local/bin:$PATH\"\n to \n~/.bashrc\n\n\n\n\n\n\nRun \nmkdir -p openmpf-projects/openmpf/trunk/install/share/logs\n\n\n\n\n\n\nRun \nsudo cp openmpf-projects/openmpf/trunk/mpf-install/src/main/scripts/mpf-profile.sh /etc/profile.d/mpf.sh\n\n\n\n\n\n\nRun \nsudo sh -c 'echo /home/mpf/mpf-sdk-install/lib >> /etc/ld.so.conf.d/mpf.conf'\n\n\n\n\n\n\nRun \nsudo cp openmpf-projects/openmpf/trunk/node-manager/src/scripts/node-manager.service /etc/systemd/system/node-manager.service\n\n\n\n\n\n\nRun \ncd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties\n\n\n\n\n\n\nRun \nsudo sh -c 'echo \"[mpf-child]\\nlocalhost\" >> /etc/ansible/hosts'\n\n\n\n\n\n\nRun \nmkdir -p ~/.m2/repository/; tar -f /home/mpf/openmpf-projects/openmpf-build-tools/mpf-maven-deps.tar.gz --extract --gzip --directory ~/.m2/repository/\n\n\n\n\n\n\nReboot the VM.\n\n\n\n\n\n\nAt this point you may wish to install additional dependencies so that you can\nbuild specific OpenMPF components. Refer to the commands in the \nDockerfile\n\nfor each component you're interested in.\n\n\nConfigure Users\n\n\nTo change the default user password settings, modify\n\nopenmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/user.properties\n.\nNote that the default settings are public knowledge, which could be a security\nrisk.\n\n\nNote that \nmpf remove-user\n and \nmpf add-user\n commands explained in the\n\nCommand Line Tools\n section do not modify the\n\nuser.properties\n file. If you remove a user using the \nmpf remove-user\n\ncommand, the changes will take effect at runtime, but an entry may still exist\nfor that user in the \nuser.properties\n file. If so, then the user account will\nbe recreated the next time the Workflow Manager is restarted.\n\n\nBuild and Run the OpenMPF Workflow Manager Web Application\n\n\n\n\nBuild OpenMPF:\n\n\n\n\ncd ~/openmpf-projects/openmpf;\nmvn clean install \\\n -DskipTests -Dmaven.test.skip=true \\\n -DskipITs \\\n -Dcomponents.build.components=openmpf-components/cpp/OcvFaceDetection \\\n -Dstartup.auto.registration.skip=false;\n\n\n\n\n\nStart OpenMPF with \nmpf start\n.\n\n\n\n\nLook for this log message in the terminal with a time value indicating the Workflow Manager has\nfinished starting:\n\n\n2022-10-11 12:21:16,447 INFO [main] o.m.m.Application - Started Application in 22.843 seconds (JVM running for 24.661)\n\n\n\nAfter startup, the Workflow Manager will be available at \nhttp://localhost:8080\n.\nBrowse to this URL using Firefox or Chrome.\n\n\nIf you want to test regular user capabilities, log in as the \"mpf\" user with\nthe \"mpf123\" password. Please see the\n\nOpenMPF User Guide\n for more information.\nAlternatively, if you want to test admin capabilities then log in as \"admin\"\nuser with the \"mpfadm\" password. Please see the\n\nOpenMPF Admin Guide\n for more information.\nWhen finished using OpenMPF, stop Workflow Manager with \nctrl-c\n and then run \nmpf stop\n to stop\nthe other system dependencies.\n\n\nThe preferred method to start and stop services for OpenMPF is with the\n\nmpf start\n and \nmpf stop\n commands. For additional information on these\ncommands, please see the\n\nCommand Line Tools\n section.\nThese will start and stop the PostgreSQL, Redis, Node Manager, and Workflow Manager processes.\n\n\nKnown Issues\n\n\no.m.m.m.c.JobController - Failure creating job. supplier.get()\n\n\nIf you see an error message similar to:\n\n\n2022-02-07 17:17:30,538 ERROR [http-nio-8080-exec-1] o.m.m.m.c.JobController - Failure creating job. supplier.get()\njava.lang.NullPointerException: supplier.get()\n at java.util.Objects.requireNonNull(Objects.java:246) ~[?:?]\n at java.util.Objects.requireNonNullElseGet(Objects.java:321) ~[?:?]\n at org.mitre.mpf.wfm.util.PropertiesUtil.getHostName(PropertiesUtil.java:267) ~[classes/:?]\n at org.mitre.mpf.wfm.util.PropertiesUtil.getExportedJobId(PropertiesUtil.java:285) ~[classes/:?]\n\n\n\nOpen \n/etc/profile.d/mpf.sh\n and change \nexport HOSTNAME\n to\n\nexport HOSTNAME=$(hostname)\n. Then, restart the VM.\n\n\nAppendices\n\n\nCommand Line Tools\n\n\nOpenMPF installs command line tools that can be accessed through a terminal\non the development machine. All of the tools take the form of actions:\n\nmpf [options ...]\n.\n\n\nExecute \nmpf --help\n for general documentation and \nmpf --help\n for\ndocumentation about a specific action.\n\n\n\n\nStart / Stop Actions\n: Actions for starting and stopping the OpenMPF\n system dependencies, including PostgreSQL, Redis, Workflow Manager, and the\n node managers on the various nodes in the OpenMPF cluster.\n\n\nmpf status\n: displays a message indicating whether each of the system\n dependencies is running or not\n\n\nmpf start\n: starts all of the system dependencies\n\n\nmpf stop\n: stops all of the system dependencies\n\n\nmpf restart\n : stops and then starts all of the system dependencies\n\n\n\n\n\n\nUser Actions\n: Actions for managing Workflow Manager user accounts. If\n changes are made to an existing user then that user will need to log off or\n the Workflow Manager will need to be restarted for the changes to take effect.\n\n\nmpf list-users\n : lists all of the existing user accounts and their role\n (non-admin or admin)\n\n\nmpf add-user \n: adds a new user account; will be\n prompted to enter the account password\n\n\nmpf remove-user \n : removes an existing user account\n\n\nmpf change-role \n : change the role (non-admin to admin\n or vice versa) for an existing user\n\n\nmpf change-password \n: change the password for an existing\n user; will be prompted to enter the new account password\n\n\n\n\n\n\nClean Actions\n: Actions to remove old data and revert the system to a\n new install state. User accounts, registered components, as well as custom\n actions, tasks, and pipelines, are preserved.\n\n\nmpf clean\n: cleans out old job information and results, pending job requests, and marked up\n media files, but preserves log files and uploaded media.\n\n\nmpf clean --delete-logs --delete-uploaded-media\n: the same as \nmpf clean\n\n but also deletes log files and uploaded media\n\n\n\n\n\n\nNode Action\n: Actions for managing node membership in the OpenMPF cluster.\n\n\nmpf list-nodes\n: If the Workflow Manager is running, get the current\n JGroups view; otherwise, list the core nodes\n\n\n\n\n\n\n\n\nPackaging a Component\n\n\nIn a non-Docker deployment, admin users can register component packages through\nthe web UI. Refer to \nComponent Registration\n.\n\n\nOnce the descriptor file is complete, as described in\n\nComponent Descriptor Reference\n,\nthe next step is to compile your component source code, and finally, create a\n.tar.gz package containing the descriptor file, component library, and all\nother necessary files.\n\n\nThe package should contain a top-level directory with a unique name that will\nnot conflict with existing component packages that have already been developed.\nThe top-level directory name should be the same as the \ncomponentName\n.\n\n\nWithin the top-level directory there must be a directory named \u201cdescriptor\u201d\nwith the descriptor JSON file in it. The name of the file must be\n\u201cdescriptor.json\u201d.\n\n\nExample:\n\n\n//sample-component-1.0.0-tar.gz contents\nSampleComponent/\n config/\n descriptor/\n descriptor.json\n lib/\n\n\n\nInstalling and registering a component\n\n\nThe Component Registration web page, located in the Admin section of the\nOpenMPF web user interface, can be used to upload and register the component.\n\n\nDrag and drop the .tar.gz file containing the component onto the dropzone area\nof that page. The component will automatically be uploaded and registered.\n\n\nUpon successful registration, the component will be available for deployment\nonto OpenMPF nodes via the Node Configuration web page and\n\n/rest/nodes/config\n end point.\n\n\nIf the descriptor contains custom actions, tasks, or pipelines, then they will\nbe automatically added to the system upon registration.\n\n\n\n\nNOTE:\n If the descriptor does not contain custom actions, tasks,\nor pipelines, then a default action, task, and pipeline will be generated\nand added to the system.\n\n\nThe default action will use the component\u2019s algorithm with its default\nproperty value settings.\nThe default task will use the default action.\nThe default pipeline will use the default task. This will only be generated\nif the algorithm does not specify any \nrequiresCollection\n states.\n\n\n\n\nUnregistering a component\n\n\nA component can be unregistered by using the remove button on the Component\nRegistration page.\n\n\nDuring unregistration, all services, algorithms, actions, tasks, and pipelines\nassociated with the component are deleted. Additionally, all actions, tasks,\nand pipelines that depend on these elements are removed.\n\n\nWeb UI\n\n\nThe following sections will cover some additional functionality permitted to\nadmin users in a non-Docker deployment.\n\n\nNode Configuration and Status\n\n\nThis page provides a list of all of the services that are configured to run on\nthe OpenMPF cluster:\n\n\n\n\nEach node shows information about the current status of each service, if it is\nunlaunchable due to an underlying error, and how many services are running for\neach node. If a service is unlaunchable, it will be indicated using a red\nstatus icon (not shown). Note that services are grouped by component type.\nClick the chevron \">\" to expand a service group to view the individual services.\n\n\nAn admin user can start, stop, or restart them on an individual basis. If a\nnon-admin user views this page, the \"Action(s)\" column is not displayed. This\npage also enables an admin user to edit the configuration for all nodes in the\nOpenMPF cluster. A non-admin user can only view the existing configuration.\n\n\nAn admin user can add a node by using the \"Add Node\" button and selecting a\nnode in the OpenMPF cluster from the drop-down list. You can also select to add\nall services at this time. A node and all if its configured services can be\nremoved by clicking the trash can to the right of the node's hostname.\n\n\nAn admin user can add services individually by selecting the node edit button\nat the bottom of the node. The number of service instances can be increased or\ndecreased by using the drop-down. Click the \"Submit\" button to save the changes.\n\n\nWhen making changes, please be aware of the following:\n\n\n\n\nIt may take a minute for the configuration to take effect on the server.\n\n\nIf you remove an existing service from a node, any job that service is\n processing will be stopped, and you will need to resubmit that job.\n\n\nIf you create a new node, its configuration will not take effect until the\n OpenMPF software is properly installed and started on the associated host.\n\n\nIf you delete a node, you will need to manually turn off the hardware running\n that node (deleting a node does not shut down the machine).\n\n\n\n\nComponent Registration\n\n\nThis page allows an admin user to add and remove non-default components to and\nfrom the system:\n\n\n\n\nA component package takes the form of a tar.gz file. An admin user can either\ndrag and drop the file onto the \"Upload a new component\" dropzone area or click\nthe dropzone area to open a file browser and select the file that way.\nIn either case, the component will begin to be uploaded to the system. If the\nadmin user dragged and dropped the file onto the dropzone area then the upload\nprogress will be shown in that area. Once uploaded, the Workflow Manager will\nautomatically attempt to register the component. Notification messages will\nappear in the upper right side of the screen to indicate success or failure if\nan error occurs. The \"Current Components\" table will display the component\nstatus.\n\n\n\n\nIf for some reason the component package upload succeeded but the component\nregistration failed then the admin user will be able to click the \"Register\"\nbutton again to try to another registration attempt. For example, the admin\nuser may do this after reviewing the Workflow Manager logs and resolving any\nissues that prevented the component from successfully registering the first\ntime. One reason may be that a component with the same name already exists on\nthe system. Note that an error will also occur if the top-level directory of\nthe component package, once extracted, already exists in the \n/opt/mpf/plugins\n\ndirectory on the system.\n\n\nOnce registered, an admin user has the option to remove the component. This\nwill unregister it and completely remove any configured services, as well as\nthe uploaded file and its extracted contents, from the system. Also, the\ncomponent algorithm as well as any actions, tasks, and pipelines specified in\nthe component's descriptor file will be removed when the component is removed.",
+ "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\n\n \nWARNING:\n\n For most component developers, these steps are not necessary. Instead,\n refer to the\n \nC++\n,\n \nPython\n, or\n \nJava\n\n README for developing a Docker component in your desired language.\n\n\n\n\n\n \nWARNING:\n This guide is a work in progress and may not be completely\n accurate or comprehensive.\n\n\n\n\nOverview\n\n\nThe following instructions are for setting up an environment for building and\nrunning OpenMPF outside of Docker. They serve as a reference for developers who\nwant to develop the Workflow Manager web application itself and perform end-to-\nend integration testing.\n\n\nSetup VM\n\n\n\n\n\n\nDownload the ISO for the desktop version of Ubuntu 20.04 from\n \nhttps://releases.ubuntu.com/20.04\n.\n\n\n\n\n\n\nCreate an Ubuntu VM using the downloaded iso. This part is different based on\n what VM software you are using.\n\n\n\n\nUse mpf as your username.\n\n\nDuring the initial install, the VM window was small and didn't stretch to\n fill up the screen, but this may be fixed automatically after the installation\n finishes, or there may be additional steps necessary to install tools or\n configure settings based on your VM software.\n\n\n\n\n\n\n\n\nAfter completing the installation, you will likely be prompted to update\n software. You should install the updates.\n\n\n\n\n\n\nOptionally, shutdown the VM and take a snapshot. This will enable you to revert back\n to a clean Ubuntu install in case anything goes wrong.\n\n\n\n\n\n\nOpen a terminal and run \nsudo apt update\n\n\n\n\n\n\nRun \nsudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl\n\n\n\n\n\n\nRun \nsudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h\n\n\n\n\n\n\nRun \nsudo ln --symbolic /usr/bin/cmake /usr/bin/cmake3\n\n\n\n\n\n\nFollow instructions to install Docker:\n \nhttps://docs.docker.com/engine/install/ubuntu/#install-using-the-repository\n\n\n\n\n\n\nOptionally, configure Docker to use socket activation. The advantage of socket activation is\n that systemd will automatically start the Docker daemon when you use \ndocker\n commands:\n\n\n\n\n\n\nsudo systemctl disable docker.service;\nsudo systemctl stop docker.service;\nsudo systemctl enable docker.socket;\n\n\n\n\n\n\n\nFollow instructions so that you can run Docker without sudo:\n \nhttps://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user\n\n\n\n\n\n\nInstall Docker Compose:\n\n\n\n\n\n\nsudo apt update\nsudo apt install docker-compose-plugin\n\n\n\n\n\n\n\nOptionally, stop redis from starting automatically:\n \nsudo systemctl disable redis\n\n\n\n\n\n\nOptionally, stop postgresql from starting automatically:\n \nsudo systemctl disable postgresql\n\n\n\n\n\n\nInitialize Postgres (use \"password\" when prompted for a password):\n\n\n\n\n\n\nsudo -i -u postgres createuser -P mpf\nsudo -i -u postgres createdb -O mpf mpf\n\n\n\n\n\nBuild and install OpenCV:\n\n\n\n\nmkdir /tmp/opencv-contrib;\nwget -O- 'https://github.com/opencv/opencv_contrib/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/opencv-contrib;\nmkdir /tmp/opencv;\ncd /tmp/opencv;\nwget -O- 'https://github.com/opencv/opencv/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip;\ncd opencv-4.9.0;\nmkdir build;\ncd build;\nexport OpenBLAS_HOME=/usr/lib/x86_64-linux-gnu/openblas-pthread; \\\ncmake -DCMAKE_INSTALL_PREFIX:PATH='/opt/opencv-4.9.0' \\\n -DWITH_IPP=false \\\n -DBUILD_EXAMPLES=false \\\n -DBUILD_TESTS=false \\\n -DBUILD_PERF_TESTS=false \\\n -DOPENCV_EXTRA_MODULES_PATH=/tmp/opencv-contrib/opencv_contrib-4.9.0/modules \\\n ..;\nsudo make --jobs \"$(nproc)\" install;\nsudo ln --symbolic '/opt/opencv-4.9.0/include/opencv4/opencv2' /usr/local/include/opencv2;\nsudo sh -c 'echo /opt/opencv-4.9.0/lib > /etc/ld.so.conf.d/mpf.conf'\nsudo ldconfig;\nsudo rm -rf /tmp/opencv-contrib /tmp/opencv;\n\n\n\n\n\nBuild and install the ActiveMQ C++ library:\n\n\n\n\nmkdir /tmp/activemq-cpp;\ncd /tmp/activemq-cpp;\nwget -O- https://dlcdn.apache.org/activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.gz \\\n | tar --extract --gzip;\ncd activemq-cpp-library-3.9.5;\n./configure;\nsudo make --jobs \"$(nproc)\" install;\nsudo rm -rf /tmp/activemq-cpp;\n\n\n\n\n\nInstall NotoEmoji font for markup:\n\n\n\n\nmkdir /tmp/noto;\ncd /tmp/noto;\nwget https://noto-website-2.storage.googleapis.com/pkgs/NotoEmoji-unhinted.zip;\nunzip NotoEmoji-unhinted.zip;\nsudo mkdir --parents /usr/share/fonts/google-noto-emoji;\nsudo cp NotoEmoji-Regular.ttf /usr/share/fonts/google-noto-emoji/;\nsudo chmod a+r /usr/share/fonts/google-noto-emoji/NotoEmoji-Regular.ttf;\nrm -rf /tmp/noto;\n\n\n\n\n\nBuild and install PNG Defry:\n\n\n\n\nmkdir /tmp/pngdefry;\ncd /tmp/pngdefry;\nwget -O- 'https://github.com/openmpf/pngdefry/archive/v1.2.tar.gz' \\\n | tar --extract --gzip;\ncd pngdefry-1.2;\nsudo gcc pngdefry.c -o /usr/local/bin/pngdefry;\nrm -rf /tmp/pngdefry;\n\n\n\n\n\nInstall Maven:\n\n\n\n\nwget -O- 'https://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz' \\\n | sudo tar --extract --gzip --directory /opt;\nsudo ln --symbolic /opt/apache-maven-3.3.3/bin/mvn /usr/local/bin;\n\n\n\n\n\nBuild and install libheif:\n\n\n\n\nmkdir /tmp/libheif;\ncd /tmp/libheif;\nwget -O- https://github.com/strukturag/libheif/archive/refs/tags/v1.12.0.tar.gz \\\n | tar --extract --gzip;\ncd libheif-1.12.0;\nmkdir build;\ncd build;\ncmake3 -DCMAKE_INSTALL_PREFIX=/usr -DWITH_EXAMPLES=false ..;\nsudo make --jobs \"$(nproc)\" install;\ncd;\nsudo rm -rf /tmp/libheif;\n\n\n\n\n\nBuild and install Protocol Buffers:\n\n\n\n\nmkdir /tmp/abseil;\nwget -O- 'https://github.com/abseil/abseil-cpp/releases/download/20240722.0/abseil-cpp-20240722.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/abseil;\n\nmkdir /tmp/protobuf;\ncd /tmp/protobuf;\nwget -O- 'https://github.com/protocolbuffers/protobuf/releases/download/v28.3/protobuf-28.3.tar.gz' \\\n | tar --extract --gzip;\n\ncd protobuf-28.3;\nmkdir build;\ncd build;\ncmake \\\n -DCMAKE_CXX_STANDARD=17 \\\n -DCMAKE_POSITION_INDEPENDENT_CODE=ON \\\n -DABSL_ROOT_DIR=/tmp/abseil/abseil-cpp-20240722.0 \\\n -Dprotobuf_BUILD_TESTS=OFF \\\n ..;\ncmake --build . --parallel \"$(nproc)\";\nsudo cmake --install . --strip;\ncd;\nsudo rm -rf /tmp/abseil /tmp/protobuf\n\n\n\n\n\nInstall Python3.12:\n\n\n\n\nsudo add-apt-repository -y ppa:deadsnakes/ppa\nsudo apt update\nsudo apt install python3.12-dev python3.12-venv\n\n\n\n\n\nFrom your home directory run:\n\n\n\n\ngit clone https://github.com/openmpf/openmpf-projects.git --recursive;\ncd openmpf-projects;\ngit checkout develop;\ngit submodule foreach git checkout develop;\n\n\n\n\n\n\n\nRun: \npip install openmpf-projects/openmpf/trunk/bin/mpf-scripts\n\n\n\n\n\n\nAdd \nPATH=\"$HOME/.local/bin:$PATH\"\n to \n~/.bashrc\n\n\n\n\n\n\nRun \nmkdir -p openmpf-projects/openmpf/trunk/install/share/logs\n\n\n\n\n\n\nRun \nsudo cp openmpf-projects/openmpf/trunk/mpf-install/src/main/scripts/mpf-profile.sh /etc/profile.d/mpf.sh\n\n\n\n\n\n\nRun \nsudo sh -c 'echo /home/mpf/mpf-sdk-install/lib >> /etc/ld.so.conf.d/mpf.conf'\n\n\n\n\n\n\nRun \nsudo cp openmpf-projects/openmpf/trunk/node-manager/src/scripts/node-manager.service /etc/systemd/system/node-manager.service\n\n\n\n\n\n\nRun \ncd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties\n\n\n\n\n\n\nRun \npip3.8 install ansible\n\n\n\n\n\n\nRun \nsudo sh -c 'echo \"[mpf-child]\\nlocalhost\" >> /etc/ansible/hosts'\n\n\n\n\n\n\nRun \nmkdir -p ~/.m2/repository/; tar -f /home/mpf/openmpf-projects/openmpf-build-tools/mpf-maven-deps.tar.gz --extract --gzip --directory ~/.m2/repository/\n\n\n\n\n\n\nReboot the VM.\n\n\n\n\n\n\nAt this point you may wish to install additional dependencies so that you can\nbuild specific OpenMPF components. Refer to the commands in the \nDockerfile\n\nfor each component you're interested in.\n\n\nConfigure Users\n\n\nTo change the default user password settings, modify\n\nopenmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/user.properties\n.\nNote that the default settings are public knowledge, which could be a security\nrisk.\n\n\nNote that \nmpf remove-user\n and \nmpf add-user\n commands explained in the\n\nCommand Line Tools\n section do not modify the\n\nuser.properties\n file. If you remove a user using the \nmpf remove-user\n\ncommand, the changes will take effect at runtime, but an entry may still exist\nfor that user in the \nuser.properties\n file. If so, then the user account will\nbe recreated the next time the Workflow Manager is restarted.\n\n\nBuild and Run the OpenMPF Workflow Manager Web Application\n\n\n\n\nBuild OpenMPF:\n\n\n\n\ncd ~/openmpf-projects/openmpf;\nmvn clean install \\\n -DskipTests -Dmaven.test.skip=true \\\n -DskipITs \\\n -Dcomponents.build.components=openmpf-components/cpp/OcvFaceDetection \\\n -Dstartup.auto.registration.skip=false;\n\n\n\n\n\nStart OpenMPF with \nmpf start\n.\n\n\n\n\nLook for this log message in the terminal with a time value indicating the Workflow Manager has\nfinished starting:\n\n\n2022-10-11 12:21:16,447 INFO [main] o.m.m.Application - Started Application in 22.843 seconds (JVM running for 24.661)\n\n\n\nAfter startup, the Workflow Manager will be available at \nhttp://localhost:8080\n.\nBrowse to this URL using Firefox or Chrome.\n\n\nIf you want to test regular user capabilities, log in as the \"mpf\" user with\nthe \"mpf123\" password. Please see the\n\nOpenMPF User Guide\n for more information.\nAlternatively, if you want to test admin capabilities then log in as \"admin\"\nuser with the \"mpfadm\" password. Please see the\n\nOpenMPF Admin Guide\n for more information.\nWhen finished using OpenMPF, stop Workflow Manager with \nctrl-c\n and then run \nmpf stop\n to stop\nthe other system dependencies.\n\n\nThe preferred method to start and stop services for OpenMPF is with the\n\nmpf start\n and \nmpf stop\n commands. For additional information on these\ncommands, please see the\n\nCommand Line Tools\n section.\nThese will start and stop the PostgreSQL, Redis, Node Manager, and Workflow Manager processes.\n\n\nKnown Issues\n\n\no.m.m.m.c.JobController - Failure creating job. supplier.get()\n\n\nIf you see an error message similar to:\n\n\n2022-02-07 17:17:30,538 ERROR [http-nio-8080-exec-1] o.m.m.m.c.JobController - Failure creating job. supplier.get()\njava.lang.NullPointerException: supplier.get()\n at java.util.Objects.requireNonNull(Objects.java:246) ~[?:?]\n at java.util.Objects.requireNonNullElseGet(Objects.java:321) ~[?:?]\n at org.mitre.mpf.wfm.util.PropertiesUtil.getHostName(PropertiesUtil.java:267) ~[classes/:?]\n at org.mitre.mpf.wfm.util.PropertiesUtil.getExportedJobId(PropertiesUtil.java:285) ~[classes/:?]\n\n\n\nOpen \n/etc/profile.d/mpf.sh\n and change \nexport HOSTNAME\n to\n\nexport HOSTNAME=$(hostname)\n. Then, restart the VM.\n\n\nAppendices\n\n\nCommand Line Tools\n\n\nOpenMPF installs command line tools that can be accessed through a terminal\non the development machine. All of the tools take the form of actions:\n\nmpf [options ...]\n.\n\n\nExecute \nmpf --help\n for general documentation and \nmpf --help\n for\ndocumentation about a specific action.\n\n\n\n\nStart / Stop Actions\n: Actions for starting and stopping the OpenMPF\n system dependencies, including PostgreSQL, Redis, Workflow Manager, and the\n node managers on the various nodes in the OpenMPF cluster.\n\n\nmpf status\n: displays a message indicating whether each of the system\n dependencies is running or not\n\n\nmpf start\n: starts all of the system dependencies\n\n\nmpf stop\n: stops all of the system dependencies\n\n\nmpf restart\n : stops and then starts all of the system dependencies\n\n\n\n\n\n\nUser Actions\n: Actions for managing Workflow Manager user accounts. If\n changes are made to an existing user then that user will need to log off or\n the Workflow Manager will need to be restarted for the changes to take effect.\n\n\nmpf list-users\n : lists all of the existing user accounts and their role\n (non-admin or admin)\n\n\nmpf add-user \n: adds a new user account; will be\n prompted to enter the account password\n\n\nmpf remove-user \n : removes an existing user account\n\n\nmpf change-role \n : change the role (non-admin to admin\n or vice versa) for an existing user\n\n\nmpf change-password \n: change the password for an existing\n user; will be prompted to enter the new account password\n\n\n\n\n\n\nClean Actions\n: Actions to remove old data and revert the system to a\n new install state. User accounts, registered components, as well as custom\n actions, tasks, and pipelines, are preserved.\n\n\nmpf clean\n: cleans out old job information and results, pending job requests, and marked up\n media files, but preserves log files and uploaded media.\n\n\nmpf clean --delete-logs --delete-uploaded-media\n: the same as \nmpf clean\n\n but also deletes log files and uploaded media\n\n\n\n\n\n\nNode Action\n: Actions for managing node membership in the OpenMPF cluster.\n\n\nmpf list-nodes\n: If the Workflow Manager is running, get the current\n JGroups view; otherwise, list the core nodes\n\n\n\n\n\n\n\n\nPackaging a Component\n\n\nIn a non-Docker deployment, admin users can register component packages through\nthe web UI. Refer to \nComponent Registration\n.\n\n\nOnce the descriptor file is complete, as described in\n\nComponent Descriptor Reference\n,\nthe next step is to compile your component source code, and finally, create a\n.tar.gz package containing the descriptor file, component library, and all\nother necessary files.\n\n\nThe package should contain a top-level directory with a unique name that will\nnot conflict with existing component packages that have already been developed.\nThe top-level directory name should be the same as the \ncomponentName\n.\n\n\nWithin the top-level directory there must be a directory named \u201cdescriptor\u201d\nwith the descriptor JSON file in it. The name of the file must be\n\u201cdescriptor.json\u201d.\n\n\nExample:\n\n\n//sample-component-1.0.0-tar.gz contents\nSampleComponent/\n config/\n descriptor/\n descriptor.json\n lib/\n\n\n\nInstalling and registering a component\n\n\nThe Component Registration web page, located in the Admin section of the\nOpenMPF web user interface, can be used to upload and register the component.\n\n\nDrag and drop the .tar.gz file containing the component onto the dropzone area\nof that page. The component will automatically be uploaded and registered.\n\n\nUpon successful registration, the component will be available for deployment\nonto OpenMPF nodes via the Node Configuration web page and\n\n/rest/nodes/config\n end point.\n\n\nIf the descriptor contains custom actions, tasks, or pipelines, then they will\nbe automatically added to the system upon registration.\n\n\n\n\nNOTE:\n If the descriptor does not contain custom actions, tasks,\nor pipelines, then a default action, task, and pipeline will be generated\nand added to the system.\n\n\nThe default action will use the component\u2019s algorithm with its default\nproperty value settings.\nThe default task will use the default action.\nThe default pipeline will use the default task. This will only be generated\nif the algorithm does not specify any \nrequiresCollection\n states.\n\n\n\n\nUnregistering a component\n\n\nA component can be unregistered by using the remove button on the Component\nRegistration page.\n\n\nDuring unregistration, all services, algorithms, actions, tasks, and pipelines\nassociated with the component are deleted. Additionally, all actions, tasks,\nand pipelines that depend on these elements are removed.\n\n\nWeb UI\n\n\nThe following sections will cover some additional functionality permitted to\nadmin users in a non-Docker deployment.\n\n\nNode Configuration and Status\n\n\nThis page provides a list of all of the services that are configured to run on\nthe OpenMPF cluster:\n\n\n\n\nEach node shows information about the current status of each service, if it is\nunlaunchable due to an underlying error, and how many services are running for\neach node. If a service is unlaunchable, it will be indicated using a red\nstatus icon (not shown). Note that services are grouped by component type.\nClick the chevron \">\" to expand a service group to view the individual services.\n\n\nAn admin user can start, stop, or restart them on an individual basis. If a\nnon-admin user views this page, the \"Action(s)\" column is not displayed. This\npage also enables an admin user to edit the configuration for all nodes in the\nOpenMPF cluster. A non-admin user can only view the existing configuration.\n\n\nAn admin user can add a node by using the \"Add Node\" button and selecting a\nnode in the OpenMPF cluster from the drop-down list. You can also select to add\nall services at this time. A node and all if its configured services can be\nremoved by clicking the trash can to the right of the node's hostname.\n\n\nAn admin user can add services individually by selecting the node edit button\nat the bottom of the node. The number of service instances can be increased or\ndecreased by using the drop-down. Click the \"Submit\" button to save the changes.\n\n\nWhen making changes, please be aware of the following:\n\n\n\n\nIt may take a minute for the configuration to take effect on the server.\n\n\nIf you remove an existing service from a node, any job that service is\n processing will be stopped, and you will need to resubmit that job.\n\n\nIf you create a new node, its configuration will not take effect until the\n OpenMPF software is properly installed and started on the associated host.\n\n\nIf you delete a node, you will need to manually turn off the hardware running\n that node (deleting a node does not shut down the machine).\n\n\n\n\nComponent Registration\n\n\nThis page allows an admin user to add and remove non-default components to and\nfrom the system:\n\n\n\n\nA component package takes the form of a tar.gz file. An admin user can either\ndrag and drop the file onto the \"Upload a new component\" dropzone area or click\nthe dropzone area to open a file browser and select the file that way.\nIn either case, the component will begin to be uploaded to the system. If the\nadmin user dragged and dropped the file onto the dropzone area then the upload\nprogress will be shown in that area. Once uploaded, the Workflow Manager will\nautomatically attempt to register the component. Notification messages will\nappear in the upper right side of the screen to indicate success or failure if\nan error occurs. The \"Current Components\" table will display the component\nstatus.\n\n\n\n\nIf for some reason the component package upload succeeded but the component\nregistration failed then the admin user will be able to click the \"Register\"\nbutton again to try to another registration attempt. For example, the admin\nuser may do this after reviewing the Workflow Manager logs and resolving any\nissues that prevented the component from successfully registering the first\ntime. One reason may be that a component with the same name already exists on\nthe system. Note that an error will also occur if the top-level directory of\nthe component package, once extracted, already exists in the \n/opt/mpf/plugins\n\ndirectory on the system.\n\n\nOnce registered, an admin user has the option to remove the component. This\nwill unregister it and completely remove any configured services, as well as\nthe uploaded file and its extracted contents, from the system. Also, the\ncomponent algorithm as well as any actions, tasks, and pipelines specified in\nthe component's descriptor file will be removed when the component is removed.",
"title": "Development Environment Guide"
},
{
@@ -1402,7 +1402,7 @@
},
{
"location": "/Development-Environment-Guide/index.html#setup-vm",
- "text": "Download the ISO for the desktop version of Ubuntu 20.04 from\n https://releases.ubuntu.com/20.04 . Create an Ubuntu VM using the downloaded iso. This part is different based on\n what VM software you are using. Use mpf as your username. During the initial install, the VM window was small and didn't stretch to\n fill up the screen, but this may be fixed automatically after the installation\n finishes, or there may be additional steps necessary to install tools or\n configure settings based on your VM software. After completing the installation, you will likely be prompted to update\n software. You should install the updates. Optionally, shutdown the VM and take a snapshot. This will enable you to revert back\n to a clean Ubuntu install in case anything goes wrong. Open a terminal and run sudo apt update Run sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl ansible Run sudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h Run sudo ln --symbolic /usr/bin/cmake /usr/bin/cmake3 Follow instructions to install Docker:\n https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository Optionally, configure Docker to use socket activation. The advantage of socket activation is\n that systemd will automatically start the Docker daemon when you use docker commands: sudo systemctl disable docker.service;\nsudo systemctl stop docker.service;\nsudo systemctl enable docker.socket; Follow instructions so that you can run Docker without sudo:\n https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user Install Docker Compose: sudo apt update\nsudo apt install docker-compose-plugin Optionally, stop redis from starting automatically:\n sudo systemctl disable redis Optionally, stop postgresql from starting automatically:\n sudo systemctl disable postgresql Initialize Postgres (use \"password\" when prompted for a password): sudo -i -u postgres createuser -P mpf\nsudo -i -u postgres createdb -O mpf mpf Build and install OpenCV: mkdir /tmp/opencv-contrib;\nwget -O- 'https://github.com/opencv/opencv_contrib/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/opencv-contrib;\nmkdir /tmp/opencv;\ncd /tmp/opencv;\nwget -O- 'https://github.com/opencv/opencv/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip;\ncd opencv-4.9.0;\nmkdir build;\ncd build;\nexport OpenBLAS_HOME=/usr/lib/x86_64-linux-gnu/openblas-pthread; \\\ncmake -DCMAKE_INSTALL_PREFIX:PATH='/opt/opencv-4.9.0' \\\n -DWITH_IPP=false \\\n -DBUILD_EXAMPLES=false \\\n -DBUILD_TESTS=false \\\n -DBUILD_PERF_TESTS=false \\\n -DOPENCV_EXTRA_MODULES_PATH=/tmp/opencv-contrib/opencv_contrib-4.9.0/modules \\\n ..;\nsudo make --jobs \"$(nproc)\" install;\nsudo ln --symbolic '/opt/opencv-4.9.0/include/opencv4/opencv2' /usr/local/include/opencv2;\nsudo sh -c 'echo /opt/opencv-4.9.0/lib > /etc/ld.so.conf.d/mpf.conf'\nsudo ldconfig;\nsudo rm -rf /tmp/opencv-contrib /tmp/opencv; Build and install the ActiveMQ C++ library: mkdir /tmp/activemq-cpp;\ncd /tmp/activemq-cpp;\nwget -O- https://dlcdn.apache.org/activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.gz \\\n | tar --extract --gzip;\ncd activemq-cpp-library-3.9.5;\n./configure;\nsudo make --jobs \"$(nproc)\" install;\nsudo rm -rf /tmp/activemq-cpp; Install NotoEmoji font for markup: mkdir /tmp/noto;\ncd /tmp/noto;\nwget https://noto-website-2.storage.googleapis.com/pkgs/NotoEmoji-unhinted.zip;\nunzip NotoEmoji-unhinted.zip;\nsudo mkdir --parents /usr/share/fonts/google-noto-emoji;\nsudo cp NotoEmoji-Regular.ttf /usr/share/fonts/google-noto-emoji/;\nsudo chmod a+r /usr/share/fonts/google-noto-emoji/NotoEmoji-Regular.ttf;\nrm -rf /tmp/noto; Build and install PNG Defry: mkdir /tmp/pngdefry;\ncd /tmp/pngdefry;\nwget -O- 'https://github.com/openmpf/pngdefry/archive/v1.2.tar.gz' \\\n | tar --extract --gzip;\ncd pngdefry-1.2;\nsudo gcc pngdefry.c -o /usr/local/bin/pngdefry;\nrm -rf /tmp/pngdefry; Install Maven: wget -O- 'https://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz' \\\n | sudo tar --extract --gzip --directory /opt;\nsudo ln --symbolic /opt/apache-maven-3.3.3/bin/mvn /usr/local/bin; Build and install libheif: mkdir /tmp/libheif;\ncd /tmp/libheif;\nwget -O- https://github.com/strukturag/libheif/archive/refs/tags/v1.12.0.tar.gz \\\n | tar --extract --gzip;\ncd libheif-1.12.0;\nmkdir build;\ncd build;\ncmake3 -DCMAKE_INSTALL_PREFIX=/usr -DWITH_EXAMPLES=false ..;\nsudo make --jobs \"$(nproc)\" install;\ncd;\nsudo rm -rf /tmp/libheif; Build and install Protocol Buffers: mkdir /tmp/abseil;\nwget -O- 'https://github.com/abseil/abseil-cpp/releases/download/20240722.0/abseil-cpp-20240722.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/abseil;\n\nmkdir /tmp/protobuf;\ncd /tmp/protobuf;\nwget -O- 'https://github.com/protocolbuffers/protobuf/releases/download/v28.3/protobuf-28.3.tar.gz' \\\n | tar --extract --gzip;\n\ncd protobuf-28.3;\nmkdir build;\ncd build;\ncmake \\\n -DCMAKE_CXX_STANDARD=17 \\\n -DCMAKE_POSITION_INDEPENDENT_CODE=ON \\\n -DABSL_ROOT_DIR=/tmp/abseil/abseil-cpp-20240722.0 \\\n -Dprotobuf_BUILD_TESTS=OFF \\\n ..;\ncmake --build . --parallel \"$(nproc)\";\nsudo cmake --install . --strip;\ncd;\nsudo rm -rf /tmp/abseil /tmp/protobuf From your home directory run: git clone https://github.com/openmpf/openmpf-projects.git --recursive;\ncd openmpf-projects;\ngit checkout develop;\ngit submodule foreach git checkout develop; Run: pip install openmpf-projects/openmpf/trunk/bin/mpf-scripts Add PATH=\"$HOME/.local/bin:$PATH\" to ~/.bashrc Run mkdir -p openmpf-projects/openmpf/trunk/install/share/logs Run sudo cp openmpf-projects/openmpf/trunk/mpf-install/src/main/scripts/mpf-profile.sh /etc/profile.d/mpf.sh Run sudo sh -c 'echo /home/mpf/mpf-sdk-install/lib >> /etc/ld.so.conf.d/mpf.conf' Run sudo cp openmpf-projects/openmpf/trunk/node-manager/src/scripts/node-manager.service /etc/systemd/system/node-manager.service Run cd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties Run sudo sh -c 'echo \"[mpf-child]\\nlocalhost\" >> /etc/ansible/hosts' Run mkdir -p ~/.m2/repository/; tar -f /home/mpf/openmpf-projects/openmpf-build-tools/mpf-maven-deps.tar.gz --extract --gzip --directory ~/.m2/repository/ Reboot the VM. At this point you may wish to install additional dependencies so that you can\nbuild specific OpenMPF components. Refer to the commands in the Dockerfile \nfor each component you're interested in.",
+ "text": "Download the ISO for the desktop version of Ubuntu 20.04 from\n https://releases.ubuntu.com/20.04 . Create an Ubuntu VM using the downloaded iso. This part is different based on\n what VM software you are using. Use mpf as your username. During the initial install, the VM window was small and didn't stretch to\n fill up the screen, but this may be fixed automatically after the installation\n finishes, or there may be additional steps necessary to install tools or\n configure settings based on your VM software. After completing the installation, you will likely be prompted to update\n software. You should install the updates. Optionally, shutdown the VM and take a snapshot. This will enable you to revert back\n to a clean Ubuntu install in case anything goes wrong. Open a terminal and run sudo apt update Run sudo apt install gnupg2 unzip xz-utils cmake make g++ libgtest-dev mediainfo libssl-dev liblog4cxx-dev libboost-dev file openjdk-17-jdk python3.8-dev python3-pip python3.8-venv libde265-dev libopenblas-dev liblapacke-dev libavcodec-dev libavcodec-extra libavformat-dev libavutil-dev libswscale-dev libavresample-dev libharfbuzz-dev libfreetype-dev ffmpeg git git-lfs redis postgresql-12 curl Run sudo ln --symbolic /usr/include/x86_64-linux-gnu/openblas-pthread/cblas.h /usr/include/cblas.h Run sudo ln --symbolic /usr/bin/cmake /usr/bin/cmake3 Follow instructions to install Docker:\n https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository Optionally, configure Docker to use socket activation. The advantage of socket activation is\n that systemd will automatically start the Docker daemon when you use docker commands: sudo systemctl disable docker.service;\nsudo systemctl stop docker.service;\nsudo systemctl enable docker.socket; Follow instructions so that you can run Docker without sudo:\n https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user Install Docker Compose: sudo apt update\nsudo apt install docker-compose-plugin Optionally, stop redis from starting automatically:\n sudo systemctl disable redis Optionally, stop postgresql from starting automatically:\n sudo systemctl disable postgresql Initialize Postgres (use \"password\" when prompted for a password): sudo -i -u postgres createuser -P mpf\nsudo -i -u postgres createdb -O mpf mpf Build and install OpenCV: mkdir /tmp/opencv-contrib;\nwget -O- 'https://github.com/opencv/opencv_contrib/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/opencv-contrib;\nmkdir /tmp/opencv;\ncd /tmp/opencv;\nwget -O- 'https://github.com/opencv/opencv/archive/4.9.0.tar.gz' \\\n | tar --extract --gzip;\ncd opencv-4.9.0;\nmkdir build;\ncd build;\nexport OpenBLAS_HOME=/usr/lib/x86_64-linux-gnu/openblas-pthread; \\\ncmake -DCMAKE_INSTALL_PREFIX:PATH='/opt/opencv-4.9.0' \\\n -DWITH_IPP=false \\\n -DBUILD_EXAMPLES=false \\\n -DBUILD_TESTS=false \\\n -DBUILD_PERF_TESTS=false \\\n -DOPENCV_EXTRA_MODULES_PATH=/tmp/opencv-contrib/opencv_contrib-4.9.0/modules \\\n ..;\nsudo make --jobs \"$(nproc)\" install;\nsudo ln --symbolic '/opt/opencv-4.9.0/include/opencv4/opencv2' /usr/local/include/opencv2;\nsudo sh -c 'echo /opt/opencv-4.9.0/lib > /etc/ld.so.conf.d/mpf.conf'\nsudo ldconfig;\nsudo rm -rf /tmp/opencv-contrib /tmp/opencv; Build and install the ActiveMQ C++ library: mkdir /tmp/activemq-cpp;\ncd /tmp/activemq-cpp;\nwget -O- https://dlcdn.apache.org/activemq/activemq-cpp/3.9.5/activemq-cpp-library-3.9.5-src.tar.gz \\\n | tar --extract --gzip;\ncd activemq-cpp-library-3.9.5;\n./configure;\nsudo make --jobs \"$(nproc)\" install;\nsudo rm -rf /tmp/activemq-cpp; Install NotoEmoji font for markup: mkdir /tmp/noto;\ncd /tmp/noto;\nwget https://noto-website-2.storage.googleapis.com/pkgs/NotoEmoji-unhinted.zip;\nunzip NotoEmoji-unhinted.zip;\nsudo mkdir --parents /usr/share/fonts/google-noto-emoji;\nsudo cp NotoEmoji-Regular.ttf /usr/share/fonts/google-noto-emoji/;\nsudo chmod a+r /usr/share/fonts/google-noto-emoji/NotoEmoji-Regular.ttf;\nrm -rf /tmp/noto; Build and install PNG Defry: mkdir /tmp/pngdefry;\ncd /tmp/pngdefry;\nwget -O- 'https://github.com/openmpf/pngdefry/archive/v1.2.tar.gz' \\\n | tar --extract --gzip;\ncd pngdefry-1.2;\nsudo gcc pngdefry.c -o /usr/local/bin/pngdefry;\nrm -rf /tmp/pngdefry; Install Maven: wget -O- 'https://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz' \\\n | sudo tar --extract --gzip --directory /opt;\nsudo ln --symbolic /opt/apache-maven-3.3.3/bin/mvn /usr/local/bin; Build and install libheif: mkdir /tmp/libheif;\ncd /tmp/libheif;\nwget -O- https://github.com/strukturag/libheif/archive/refs/tags/v1.12.0.tar.gz \\\n | tar --extract --gzip;\ncd libheif-1.12.0;\nmkdir build;\ncd build;\ncmake3 -DCMAKE_INSTALL_PREFIX=/usr -DWITH_EXAMPLES=false ..;\nsudo make --jobs \"$(nproc)\" install;\ncd;\nsudo rm -rf /tmp/libheif; Build and install Protocol Buffers: mkdir /tmp/abseil;\nwget -O- 'https://github.com/abseil/abseil-cpp/releases/download/20240722.0/abseil-cpp-20240722.0.tar.gz' \\\n | tar --extract --gzip --directory /tmp/abseil;\n\nmkdir /tmp/protobuf;\ncd /tmp/protobuf;\nwget -O- 'https://github.com/protocolbuffers/protobuf/releases/download/v28.3/protobuf-28.3.tar.gz' \\\n | tar --extract --gzip;\n\ncd protobuf-28.3;\nmkdir build;\ncd build;\ncmake \\\n -DCMAKE_CXX_STANDARD=17 \\\n -DCMAKE_POSITION_INDEPENDENT_CODE=ON \\\n -DABSL_ROOT_DIR=/tmp/abseil/abseil-cpp-20240722.0 \\\n -Dprotobuf_BUILD_TESTS=OFF \\\n ..;\ncmake --build . --parallel \"$(nproc)\";\nsudo cmake --install . --strip;\ncd;\nsudo rm -rf /tmp/abseil /tmp/protobuf Install Python3.12: sudo add-apt-repository -y ppa:deadsnakes/ppa\nsudo apt update\nsudo apt install python3.12-dev python3.12-venv From your home directory run: git clone https://github.com/openmpf/openmpf-projects.git --recursive;\ncd openmpf-projects;\ngit checkout develop;\ngit submodule foreach git checkout develop; Run: pip install openmpf-projects/openmpf/trunk/bin/mpf-scripts Add PATH=\"$HOME/.local/bin:$PATH\" to ~/.bashrc Run mkdir -p openmpf-projects/openmpf/trunk/install/share/logs Run sudo cp openmpf-projects/openmpf/trunk/mpf-install/src/main/scripts/mpf-profile.sh /etc/profile.d/mpf.sh Run sudo sh -c 'echo /home/mpf/mpf-sdk-install/lib >> /etc/ld.so.conf.d/mpf.conf' Run sudo cp openmpf-projects/openmpf/trunk/node-manager/src/scripts/node-manager.service /etc/systemd/system/node-manager.service Run cd ~/openmpf-projects/openmpf/trunk/workflow-manager/src/main/resources/properties/; cp mpf-private-example.properties mpf-private.properties Run pip3.8 install ansible Run sudo sh -c 'echo \"[mpf-child]\\nlocalhost\" >> /etc/ansible/hosts' Run mkdir -p ~/.m2/repository/; tar -f /home/mpf/openmpf-projects/openmpf-build-tools/mpf-maven-deps.tar.gz --extract --gzip --directory ~/.m2/repository/ Reboot the VM. At this point you may wish to install additional dependencies so that you can\nbuild specific OpenMPF components. Refer to the commands in the Dockerfile \nfor each component you're interested in.",
"title": "Setup VM"
},
{
diff --git a/docs/site/sitemap.xml b/docs/site/sitemap.xml
index 2a4a819a2077..adcaa9986b91 100644
--- a/docs/site/sitemap.xml
+++ b/docs/site/sitemap.xml
@@ -2,157 +2,157 @@
/index.html
- 2025-02-14
+ 2025-03-17
daily
/Release-Notes/index.html
- 2025-02-14
+ 2025-03-17
daily
/License-And-Distribution/index.html
- 2025-02-14
+ 2025-03-17
daily
/Acknowledgements/index.html
- 2025-02-14
+ 2025-03-17
daily
/Install-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Admin-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/User-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/OpenID-Connect-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Media-Segmentation-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Feed-Forward-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Derivative-Media-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Object-Storage-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Markup-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/TiesDb-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Trigger-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Roll-Up-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Health-Check-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Quality-Selection-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Media-Selectors-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/REST-API/index.html
- 2025-02-14
+ 2025-03-17
daily
/Component-API-Overview/index.html
- 2025-02-14
+ 2025-03-17
daily
/Component-Descriptor-Reference/index.html
- 2025-02-14
+ 2025-03-17
daily
/CPP-Batch-Component-API/index.html
- 2025-02-14
+ 2025-03-17
daily
/Python-Batch-Component-API/index.html
- 2025-02-14
+ 2025-03-17
daily
/Java-Batch-Component-API/index.html
- 2025-02-14
+ 2025-03-17
daily
/GPU-Support-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Contributor-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Development-Environment-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Node-Guide/index.html
- 2025-02-14
+ 2025-03-17
daily
/Workflow-Manager-Architecture/index.html
- 2025-02-14
+ 2025-03-17
daily
/CPP-Streaming-Component-API/index.html
- 2025-02-14
+ 2025-03-17
daily
\ No newline at end of file
From 24a0a35cc6e2b32f069c631f0b96ee1953fe9dec Mon Sep 17 00:00:00 2001
From: Brian Rosenberg
Date: Tue, 19 Aug 2025 08:58:19 -0400
Subject: [PATCH 2/3] Address PR issues
---
docs/docs/Python-Batch-Component-API.md | 118 ++++--------------
.../Python-Batch-Component-API/index.html | 114 ++++-------------
docs/site/index.html | 2 +-
docs/site/search/search_index.json | 6 +-
docs/site/sitemap.xml | 62 ++++-----
5 files changed, 79 insertions(+), 223 deletions(-)
diff --git a/docs/docs/Python-Batch-Component-API.md b/docs/docs/Python-Batch-Component-API.md
index a0b7435b9df3..76b88be7c977 100644
--- a/docs/docs/Python-Batch-Component-API.md
+++ b/docs/docs/Python-Batch-Component-API.md
@@ -280,8 +280,8 @@ MyComponent
├── MyComponent-0.1-py3-none-any.whl
├── mpf_component_api-0.1-py3-none-any.whl
├── mpf_component_util-0.1-py3-none-any.whl
- ├── numpy-1.18.4-cp38-cp38-manylinux1_x86_64.whl
- └── opencv_python-4.2.0.34-cp38-cp38-manylinux1_x86_64.whl
+ ├── numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
+ └── opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl
```
To create the plugin packages you can run the build script as follows:
@@ -1105,7 +1105,7 @@ generating an exception, choose the type that best describes your error.
# Python Component Build Environment
-All Python components must work with CPython 3.8.10. Also, Python components
+All Python components must work with CPython 3.12. Also, Python components
must work with the Linux version that is used by the OpenMPF Component
Executable. At this writing, OpenMPF runs on
Ubuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any
@@ -1121,98 +1121,26 @@ Python 3 implementation because it does not use any implementation-specific
features. `none` means that it does not use the Python ABI. `any` means it will
work on any platform.
-The following combinations of compatibility tags are supported:
-
-* `cp38-cp38-manylinux2014_x86_64`
-* `cp38-cp38-manylinux2010_x86_64`
-* `cp38-cp38-manylinux1_x86_64`
-* `cp38-cp38-linux_x86_64`
-* `cp38-abi3-manylinux2014_x86_64`
-* `cp38-abi3-manylinux2010_x86_64`
-* `cp38-abi3-manylinux1_x86_64`
-* `cp38-abi3-linux_x86_64`
-* `cp38-none-manylinux2014_x86_64`
-* `cp38-none-manylinux2010_x86_64`
-* `cp38-none-manylinux1_x86_64`
-* `cp38-none-linux_x86_64`
-* `cp37-abi3-manylinux2014_x86_64`
-* `cp37-abi3-manylinux2010_x86_64`
-* `cp37-abi3-manylinux1_x86_64`
-* `cp37-abi3-linux_x86_64`
-* `cp36-abi3-manylinux2014_x86_64`
-* `cp36-abi3-manylinux2010_x86_64`
-* `cp36-abi3-manylinux1_x86_64`
-* `cp36-abi3-linux_x86_64`
-* `cp35-abi3-manylinux2014_x86_64`
-* `cp35-abi3-manylinux2010_x86_64`
-* `cp35-abi3-manylinux1_x86_64`
-* `cp35-abi3-linux_x86_64`
-* `cp34-abi3-manylinux2014_x86_64`
-* `cp34-abi3-manylinux2010_x86_64`
-* `cp34-abi3-manylinux1_x86_64`
-* `cp34-abi3-linux_x86_64`
-* `cp33-abi3-manylinux2014_x86_64`
-* `cp33-abi3-manylinux2010_x86_64`
-* `cp33-abi3-manylinux1_x86_64`
-* `cp33-abi3-linux_x86_64`
-* `cp32-abi3-manylinux2014_x86_64`
-* `cp32-abi3-manylinux2010_x86_64`
-* `cp32-abi3-manylinux1_x86_64`
-* `cp32-abi3-linux_x86_64`
-* `py38-none-manylinux2014_x86_64`
-* `py38-none-manylinux2010_x86_64`
-* `py38-none-manylinux1_x86_64`
-* `py38-none-linux_x86_64`
-* `py3-none-manylinux2014_x86_64`
-* `py3-none-manylinux2010_x86_64`
-* `py3-none-manylinux1_x86_64`
-* `py3-none-linux_x86_64`
-* `py37-none-manylinux2014_x86_64`
-* `py37-none-manylinux2010_x86_64`
-* `py37-none-manylinux1_x86_64`
-* `py37-none-linux_x86_64`
-* `py36-none-manylinux2014_x86_64`
-* `py36-none-manylinux2010_x86_64`
-* `py36-none-manylinux1_x86_64`
-* `py36-none-linux_x86_64`
-* `py35-none-manylinux2014_x86_64`
-* `py35-none-manylinux2010_x86_64`
-* `py35-none-manylinux1_x86_64`
-* `py35-none-linux_x86_64`
-* `py34-none-manylinux2014_x86_64`
-* `py34-none-manylinux2010_x86_64`
-* `py34-none-manylinux1_x86_64`
-* `py34-none-linux_x86_64`
-* `py33-none-manylinux2014_x86_64`
-* `py33-none-manylinux2010_x86_64`
-* `py33-none-manylinux1_x86_64`
-* `py33-none-linux_x86_64`
-* `py32-none-manylinux2014_x86_64`
-* `py32-none-manylinux2010_x86_64`
-* `py32-none-manylinux1_x86_64`
-* `py32-none-linux_x86_64`
-* `py31-none-manylinux2014_x86_64`
-* `py31-none-manylinux2010_x86_64`
-* `py31-none-manylinux1_x86_64`
-* `py31-none-linux_x86_64`
-* `py30-none-manylinux2014_x86_64`
-* `py30-none-manylinux2010_x86_64`
-* `py30-none-manylinux1_x86_64`
-* `py30-none-linux_x86_64`
-* `cp38-none-any`
-* `py38-none-any`
-* `py3-none-any`
-* `py37-none-any`
-* `py36-none-any`
-* `py35-none-any`
-* `py34-none-any`
-* `py33-none-any`
-* `py32-none-any`
-* `py31-none-any`
-* `py30-none-any`
-
-The list above was generated with the following command:
-`python3 -c 'import pip._internal.pep425tags as tags; print("\n".join(str(t) for t in tags.get_supported()))'`
+The acceptable Python version tags are:
+
+- `cp312` (or lower)
+- `py312` (or lower)
+
+The **ONLY** acceptable ABI tags are:
+
+- `cp312`
+- `abi3`
+- `none`
+
+The acceptable platform tags are:
+
+- `any`
+- `linux_x86_64`
+- `manylinux2010_x86_64`
+- `manylinux2014_x86_64`
+- `manylinux1_x86_64`
+- `manylinux_2_5_x86_64` through `manylinux_2_31_x86_64`
+
Components should be supplied as a tar file, which includes not only the component library, but any other libraries or
files needed for execution. This includes all other non-standard libraries used by the component
diff --git a/docs/site/Python-Batch-Component-API/index.html b/docs/site/Python-Batch-Component-API/index.html
index d676416de08d..11f3b5ba4651 100644
--- a/docs/site/Python-Batch-Component-API/index.html
+++ b/docs/site/Python-Batch-Component-API/index.html
@@ -521,8 +521,8 @@ To create the plugin packages you can run the build script as follows:
~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent
@@ -1389,7 +1389,7 @@ How to Report Errors
the MPF_ prefix. You can replace the MISSING_PROPERTY part in the above code with any other error type. When
generating an exception, choose the type that best describes your error.
Python Component Build Environment
-All Python components must work with CPython 3.8.10. Also, Python components
+
All Python components must work with CPython 3.12. Also, Python components
must work with the Linux version that is used by the OpenMPF Component
Executable. At this writing, OpenMPF runs on
Ubuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any
@@ -1404,98 +1404,26 @@
Python Component Build Environment
Python 3 implementation because it does not use any implementation-specific
features. none means that it does not use the Python ABI. any means it will
work on any platform.
-
The following combinations of compatibility tags are supported:
+The acceptable Python version tags are:
-cp38-cp38-manylinux2014_x86_64
-cp38-cp38-manylinux2010_x86_64
-cp38-cp38-manylinux1_x86_64
-cp38-cp38-linux_x86_64
-cp38-abi3-manylinux2014_x86_64
-cp38-abi3-manylinux2010_x86_64
-cp38-abi3-manylinux1_x86_64
-cp38-abi3-linux_x86_64
-cp38-none-manylinux2014_x86_64
-cp38-none-manylinux2010_x86_64
-cp38-none-manylinux1_x86_64
-cp38-none-linux_x86_64
-cp37-abi3-manylinux2014_x86_64
-cp37-abi3-manylinux2010_x86_64
-cp37-abi3-manylinux1_x86_64
-cp37-abi3-linux_x86_64
-cp36-abi3-manylinux2014_x86_64
-cp36-abi3-manylinux2010_x86_64
-cp36-abi3-manylinux1_x86_64
-cp36-abi3-linux_x86_64
-cp35-abi3-manylinux2014_x86_64
-cp35-abi3-manylinux2010_x86_64
-cp35-abi3-manylinux1_x86_64
-cp35-abi3-linux_x86_64
-cp34-abi3-manylinux2014_x86_64
-cp34-abi3-manylinux2010_x86_64
-cp34-abi3-manylinux1_x86_64
-cp34-abi3-linux_x86_64
-cp33-abi3-manylinux2014_x86_64
-cp33-abi3-manylinux2010_x86_64
-cp33-abi3-manylinux1_x86_64
-cp33-abi3-linux_x86_64
-cp32-abi3-manylinux2014_x86_64
-cp32-abi3-manylinux2010_x86_64
-cp32-abi3-manylinux1_x86_64
-cp32-abi3-linux_x86_64
-py38-none-manylinux2014_x86_64
-py38-none-manylinux2010_x86_64
-py38-none-manylinux1_x86_64
-py38-none-linux_x86_64
-py3-none-manylinux2014_x86_64
-py3-none-manylinux2010_x86_64
-py3-none-manylinux1_x86_64
-py3-none-linux_x86_64
-py37-none-manylinux2014_x86_64
-py37-none-manylinux2010_x86_64
-py37-none-manylinux1_x86_64
-py37-none-linux_x86_64
-py36-none-manylinux2014_x86_64
-py36-none-manylinux2010_x86_64
-py36-none-manylinux1_x86_64
-py36-none-linux_x86_64
-py35-none-manylinux2014_x86_64
-py35-none-manylinux2010_x86_64
-py35-none-manylinux1_x86_64
-py35-none-linux_x86_64
-py34-none-manylinux2014_x86_64
-py34-none-manylinux2010_x86_64
-py34-none-manylinux1_x86_64
-py34-none-linux_x86_64
-py33-none-manylinux2014_x86_64
-py33-none-manylinux2010_x86_64
-py33-none-manylinux1_x86_64
-py33-none-linux_x86_64
-py32-none-manylinux2014_x86_64
-py32-none-manylinux2010_x86_64
-py32-none-manylinux1_x86_64
-py32-none-linux_x86_64
-py31-none-manylinux2014_x86_64
-py31-none-manylinux2010_x86_64
-py31-none-manylinux1_x86_64
-py31-none-linux_x86_64
-py30-none-manylinux2014_x86_64
-py30-none-manylinux2010_x86_64
-py30-none-manylinux1_x86_64
-py30-none-linux_x86_64
-cp38-none-any
-py38-none-any
-py3-none-any
-py37-none-any
-py36-none-any
-py35-none-any
-py34-none-any
-py33-none-any
-py32-none-any
-py31-none-any
-py30-none-any
+cp312 (or lower)
+py312 (or lower)
+
+The ONLY acceptable ABI tags are:
+
+The acceptable platform tags are:
+
+any
+linux_x86_64
+manylinux2010_x86_64
+manylinux2014_x86_64
+manylinux1_x86_64
+manylinux_2_5_x86_64 through manylinux_2_31_x86_64
-The list above was generated with the following command:
-python3 -c 'import pip._internal.pep425tags as tags; print("\n".join(str(t) for t in tags.get_supported()))'
Components should be supplied as a tar file, which includes not only the component library, but any other libraries or
files needed for execution. This includes all other non-standard libraries used by the component
(aside from the standard Python libraries), and any configuration or data files.
diff --git a/docs/site/index.html b/docs/site/index.html
index 0960e47e5ff6..fa97015940fe 100644
--- a/docs/site/index.html
+++ b/docs/site/index.html
@@ -404,5 +404,5 @@ Overview
diff --git a/docs/site/search/search_index.json b/docs/site/search/search_index.json
index ab680462b4ef..3384dca29300 100644
--- a/docs/site/search/search_index.json
+++ b/docs/site/search/search_index.json
@@ -942,7 +942,7 @@
},
{
"location": "/Python-Batch-Component-API/index.html",
- "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\nAPI Overview\n\n\nIn OpenMPF, a \ncomponent\n is a plugin that receives jobs (containing media), processes that media, and returns results.\n\n\nThe OpenMPF Batch Component API currently supports the development of \ndetection components\n, which are used detect\nobjects in image, video, audio, or other (generic) files that reside on disk.\n\n\nUsing this API, detection components can be built to provide:\n\n\n\n\nDetection (Localizing an object)\n\n\nTracking (Localizing an object across multiple frames)\n\n\nClassification (Detecting the type of object and optionally localizing that object)\n\n\nTranscription (Detecting speech and transcribing it into text)\n\n\n\n\nHow Components Integrate into OpenMPF\n\n\nComponents are integrated into OpenMPF through the use of OpenMPF's \nComponent Executable\n.\nDevelopers create component libraries that encapsulate the component detection logic.\nEach instance of the Component Executable loads one of these libraries and uses it to service job requests\nsent by the OpenMPF Workflow Manager (WFM).\n\n\nThe Component Executable:\n\n\n\n\nReceives and parses job requests from the WFM\n\n\nInvokes methods on the component library to obtain detection results\n\n\nPopulates and sends the respective responses to the WFM\n\n\n\n\nThe basic pseudocode for the Component Executable is as follows:\n\n\ncomponent_cls = locate_component_class()\ncomponent = component_cls()\n\nwhile True:\n job = receive_job()\n\n if is_image_job(job) and hasattr(component, 'get_detections_from_image'):\n detections = component.get_detections_from_image(job)\n send_job_response(detections)\n\n elif is_video_job(job) and hasattr(component, 'get_detections_from_video'):\n detections = component.get_detections_from_video(job)\n send_job_response(detections)\n\n elif is_audio_job(job) and hasattr(component, 'get_detections_from_audio'):\n detections = component.get_detections_from_audio(job)\n send_job_response(detections)\n\n elif is_generic_job(job) and hasattr(component, 'get_detections_from_generic'):\n detections = component.get_detections_from_generic(job)\n send_job_response(detections)\n\n\n\nEach instance of a Component Executable runs as a separate process.\n\n\nThe Component Executable receives and parses requests from the WFM, invokes methods on the Component Logic to get\ndetection objects, and subsequently populates responses with the component output and sends them to the WFM.\n\n\nA component developer implements a detection component by creating a class that defines one or more of the\nget_detections_from_* methods. See the \nAPI Specification\n for more information.\n\n\nThe figures below present high-level component diagrams of the Python Batch Component API.\nThis figure shows the basic structure:\n\n\n\n\nThe Node Manager is only used in a non-Docker deployment. In a Docker deployment the Component Executor is started by the Docker container itself.\n\n\nThe Component Executor determines that it is running a Python component so it creates an instance of the\n\nPythonComponentHandle\n\nclass. The \nPythonComponentHandle\n class creates an instance of the component class and calls one of the\n\nget_detections_from_*\n methods on the component instance. The example\nabove is an image component, so \nPythonComponentHandle\n calls \nExampleImageFaceDetection.get_detections_from_image\n\non the component instance. The component instance creates an instance of\n\nmpf_component_util.ImageReader\n to access the image. Components that support video\nwould implement \nget_detections_from_video\n and use\n\nmpf_component_util.VideoCapture\n instead.\n\n\nThis figure show the structure when the mixin classes are used:\n\n\n\n\nThe figure above shows a video component, \nExampleVideoFaceDetection\n, that extends the\n\nmpf_component_util.VideoCaptureMixin\n class. \nPythonComponentHandle\n will\ncall \nget_detections_from_video\n on an instance of \nExampleVideoFaceDetection\n. \nExampleVideoFaceDetection\n does not\nimplement \nget_detections_from_video\n, so the implementation inherited from \nmpf_component_util.VideoCaptureMixin\n\ngets called. \nmpf_component_util.VideoCaptureMixin.get_detections_from_video\n creates an instance of\n\nmpf_component_util.VideoCapture\n and calls\n\nExampleVideoFaceDetection.get_detections_from_video_capture\n, passing in the \nmpf_component_util.VideoCapture\n it\njust created. \nExampleVideoFaceDetection.get_detections_from_video_capture\n is where the component reads the video\nusing the passed-in \nmpf_component_util.VideoCapture\n and attempts to find detections. Components that support images\nwould extend \nmpf_component_util.ImageReaderMixin\n, implement\n\nget_detections_from_image_reader\n, and access the image using the passed-in\n\nmpf_component_util.ImageReader\n.\n\n\nDuring component registration a \nvirtualenv\n is created for each component.\nThe virtualenv has access to the built-in Python libraries, but does not have access to any third party packages\nthat might be installed on the system. When creating the virtualenv for a setuptools-based component the only packages\nthat get installed are the component itself and any dependencies specified in the setup.cfg\nfile (including their transitive dependencies). When creating the virtualenv for a basic Python component the only\npackage that gets installed is \nmpf_component_api\n. \nmpf_component_api\n is the package containing the job classes\n(e.g. \nmpf_component_api.ImageJob\n,\n\nmpf_component_api.VideoJob\n) and detection result classes\n(e.g. \nmpf_component_api.ImageLocation\n,\n\nmpf_component_api.VideoTrack\n).\n\n\nHow to Create a Python Component\n\n\nThere are two types of Python components that are supported, setuptools-based components and basic Python components.\nBasic Python components are quicker to set up, but have no built-in support for dependency management.\nAll dependencies must be handled by the developer. Setuptools-based components are recommended since they use\nsetuptools and pip for dependency management.\n\n\nEither way, the end goal is to create a Docker image. This document describes the steps for developing a component\noutside of Docker. Many developers prefer to do that first and then focus on building and running their component\nwithin Docker after they are confident it works in a local environment. Alternatively, some developers feel confident\ndeveloping their component entirely within Docker. When you're ready for the Docker steps, refer to the\n\nREADME\n.\n\n\nGet openmpf-python-component-sdk\n\n\nIn order to create a Python component you will need to clone the\n\nopenmpf-python-component-sdk repository\n if you don't\nalready have it. While not technically required, it is recommended to also clone the\n\nopenmpf-build-tools repository\n.\nThe rest of the steps assume you cloned openmpf-python-component-sdk to\n\n~/openmpf-projects/openmpf-python-component-sdk\n. The rest of the steps also assume that if you cloned the\nopenmpf-build-tools repository, you cloned it to \n~/openmpf-projects/openmpf-build-tools\n.\n\n\nSetup Python Component Libraries\n\n\nThe component packaging steps require that wheel files for \nmpf_component_api\n, \nmpf_component_util\n, and\ntheir dependencies are available in the \n~/mpf-sdk-install/python/wheelhouse\n directory.\n\n\nIf you have openmpf-build-tools, then you can run:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk\n\n\n\nTo setup the libraries manually you can run:\n\n\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/api\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/component_util\n\n\n\nHow to Create a Setuptools-based Python Component\n\n\nIn this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json\n\n\n\n2. Create pyproject.toml file in project's top-level directory:\n\n\npyproject.toml\n should contain the following content:\n\n\n[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n\n\n3. Create setup.cfg file in project's top-level directory:\n\n\nExample of a minimal setup.cfg file:\n\n\n[metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/*\n\n\n\nThe \nname\n parameter defines the distribution name. Typically the distribution name matches the component name.\n\n\nAny dependencies that component requires should be listed in the \ninstall_requires\n field.\n\n\nThe Component Executor looks in the \nentry_points\n element and uses the \nmpf.exported_component\n field to determine\nthe component class. The right hand side of \ncomponent =\n should be the dotted module name, followed by a \n:\n,\nfollowed by the name of the class. The general pattern is\n\n'mpf.exported_component': 'component = .:'\n. In the above example,\n\nMyComponent\n is the class name. The module is listed as \nmy_component.my_component\n because the \nmy_component\n\npackage contains the \nmy_component.py\n file and the \nmy_component.py\n file contains the \nMyComponent\n class.\n\n\nThe \n[options.package_data]\n section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed.\n\n\n4. Create descriptor.json file in MyComponent/plugin-files/descriptor:\n\n\nThe \nbatchLibrary\n field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \n\"batchLibrary\" : \"MyComponent\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n5. Implement your component class:\n\n\nBelow is an example of the structure of a simple component. This component extends\n\nmpf_component_util.VideoCaptureMixin\n to simplify the use of\n\nmpf_component_util.VideoCapture\n. You would replace the call to\n\nrun_detection_algorithm_on_frame\n with your component-specific logic.\n\n\nimport logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track\n\n\n\n6. Optional: Add prebuilt wheel files if not available on PyPi:\n\n\nIf your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's \nplugin-files/wheelhouse\n directory.\nThe prebuilt library names must be listed in your \nsetup.cfg\n file's \ninstall_requires\n field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's \nplugin-files/wheelhouse\n directory.\n\n\n7. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nMyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-1.18.4-cp38-cp38-manylinux1_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.2.0.34-cp38-cp38-manylinux1_x86_64.whl\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following commands:\n\n\nmkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n8. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nHow to Create a Basic Python Component\n\n\nIn this example we create a basic Python component that supports video. An example of a basic Python component can be\nfound\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/descriptor\ntouch MyComponent/descriptor/descriptor.json\ntouch MyComponent/my_component.py\n\n\n\n2. Create descriptor.json file in MyComponent/descriptor:\n\n\nThe \nbatchLibrary\n field should be the full path to the Python file containing your component class.\nIn this example the field should be: \n\"batchLibrary\" : \"${MPF_HOME}/plugins/MyComponent/my_component.py\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n3. Implement your component class:\n\n\nBelow is an example of the structure of a simple component that does not use\n\nmpf_component_util.VideoCaptureMixin\n. You would replace the call to\n\nrun_detection_algorithm\n with your component-specific logic.\n\n\nimport logging\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n return run_detection_algorithm(video_job)\n\nEXPORT_MPF_COMPONENT = MyComponent\n\n\n\nThe Component Executor looks for a module-level variable named \nEXPORT_MPF_COMPONENT\n to specify which class\nis the component.\n\n\n4. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following command:\n\n\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n5. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nAPI Specification\n\n\nAn OpenMPF Python component is a class that defines one or more of the get_detections_from_* methods.\n\n\ncomponent.get_detections_from_* methods\n\n\nAll get_detections_from_* methods are invoked through an instance of the component class. The only parameter passed\nin is an appropriate job object (e.g. \nmpf_component_api.ImageJob\n, \nmpf_component_api.VideoJob\n). Since the methods\nare invoked through an instance, instance methods and class methods end up with two arguments, the first is either the\ninstance or the class, respectively. All get_detections_from_* methods can be implemented either as an instance method,\na static method, or a class method.\nFor example:\n\n\ninstance method:\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nstatic method:\n\n\nclass MyComponent:\n @staticmethod\n def get_detections_from_image(image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nclass method:\n\n\nclass MyComponent:\n @classmethod\n def get_detections_from_image(cls, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nAll get_detections_from_* methods must return an iterable of the appropriate detection type\n(e.g. \nmpf_component_api.ImageLocation\n, \nmpf_component_api.VideoTrack\n). The return value is normally a list or generator,\nbut any iterable can be used.\n\n\nImage API\n\n\ncomponent.get_detections_from_image(image_job)\n\n\nUsed to detect objects in an image file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nget_detections_from_image\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nimage_job\n\n\nmpf_component_api.ImageJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.ImageLocation\n\n\n\n\nmpf_component_api.ImageJob\n\n\nClass containing data used for detection of objects in an image file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.jpg\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of the image in pixels\n\n \nFRAME_HEIGHT\n : the height of the image in pixels\n\n \n\n May include the following key-value pairs:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \nHORIZONTAL_FLIP\n : true if the image is mirrored across the Y-axis, otherwise false\n\n \nEXIF_ORIENTATION\n : the standard EXIF orientation tag; a value between 1 and 8\n\n \n\n \n\n \n\n \n\n \nfeed_forward_location\n\n \nNone\n or \nmpf_component_api.ImageLocation\n\n \nAn \nmpf_component_api.ImageLocation\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.ImageLocation\n\n\nClass used to store the location of detected objects in a image file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, x_left_upper, y_left_upper, width, height, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nx_left_upper\n\n\nint\n\n\nUpper left X coordinate of the detected object.\n\n\n\n\n\n\ny_left_upper\n\n\nint\n\n\nUpper left Y coordinate of the detected object.\n\n\n\n\n\n\nwidth\n\n\nint\n\n\nThe width of the detected object.\n\n\n\n\n\n\nheight\n\n\nint\n\n\nThe height of the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nSee here for information about rotation and horizontal flipping.\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\nmpf_component_api.ImageLocation(0, 0, 100, 100, 1.0, {'CLASSIFICATION': 'backpack'})\n\n\n\nmpf_component_util.ImageReader\n\n\nmpf_component_util.ImageReader\n is a utility class for accessing images. It is the image equivalent to\n\nmpf_component_util.VideoCapture\n. Like \nmpf_component_util.VideoCapture\n,\nit may modify the read-in frame data based on job_properties. From the point of view of someone using\n\nmpf_component_util.ImageReader\n, these modifications are mostly transparent. \nmpf_component_util.ImageReader\n makes\nit look like you are reading the original image file as though it has already been rotated, flipped, cropped, etc.\n\n\nOne issue with this approach is that the detection bounding boxes will be relative to the\nmodified frame data, not the original. To make the detections relative to the original image\nthe \nmpf_component_util.ImageReader.reverse_transform(image_location)\n method must be called on each\n\nmpf_component_api.ImageLocation\n. Since the use of \nmpf_component_util.ImageReader\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.ImageReader\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_image(image_job):\n image_reader = mpf_component_util.ImageReader(image_job)\n image = image_reader.get_image()\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_image_locations = run_component_specific_algorithm(image)\n for result in result_image_locations:\n image_reader.reverse_transform(result)\n yield result\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.ImageReaderMixin\n for a more concise way to use\n\nmpf_component_util.ImageReader\n below.\n\n\nmpf_component_util.ImageReaderMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.ImageReader\n.\n\nmpf_component_util.ImageReaderMixin\n takes care of initializing a \nmpf_component_util.ImageReader\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.ImageReaderMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.ImageReaderMixin\n.\n\n\nThe component must implement \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must read the image using the \nmpf_component_util.ImageReader\n\n that is passed in to \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must NOT implement \nget_detections_from_image(image_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.ImageReader.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.ImageReaderMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.ImageReaderMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_image_reader(image_job, image_reader):\n image = image_reader.get_image()\n\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n return run_component_specific_algorithm(image)\n\n\n\nmpf_component_util.ImageReaderMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\n\n\nVideo API\n\n\ncomponent.get_detections_from_video(video_job)\n\n\nUsed to detect objects in a video file. Prior to being sent to the component, videos are split into logical \"segments\"\nof video data and each segment (containing a range of frames) is assigned to a different job. Components are not\nguaranteed to receive requests in any order. For example, the first request processed by a component might receive a\nrequest for frames 300-399 of a Video A, while the next request may cover frames 900-999 of a Video B.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_video(self, video_job):\n return [mpf_component_api.VideoTrack(...), ...]\n\n\n\nget_detections_from_video\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nvideo_job\n\n\nmpf_component_api.VideoJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.VideoTrack\n\n\n\n\nmpf_component_api.VideoJob\n\n\nClass containing data used for detection of objects in a video file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.avi\".\n\n \n\n \n\n \nstart_frame\n\n \nint\n\n \nThe first frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \nstop_frame\n\n \nint\n\n \nThe last frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of video in milliseconds\n\n \nFPS\n : frames per second (averaged for variable frame rate video)\n\n \nFRAME_COUNT\n : the number of frames in the video\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of a frame in pixels\n\n \nFRAME_HEIGHT\n : the height of a frame in pixels\n\n \nHAS_CONSTANT_FRAME_RATE\n : set to true if the video has a constant frame rate; otherwise, omitted or set to false if the video has variable frame rate or the type of frame rate cannot be determined\n\n \n\n May include the following key-value pair:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.VideoTrack\n\n \nAn \nmpf_component_api.VideoTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\n\n\nIMPORTANT:\n \nFRAME_INTERVAL\n is a common job property that many components support.\nFor frame intervals greater than 1, the component must look for detections starting with the first\nframe, and then skip frames as specified by the frame interval, until or before it reaches the stop frame.\nFor example, given a start frame of 0, a stop frame of 99, and a frame interval of 2, then the detection component\nmust look for objects in frames numbered 0, 2, 4, 6, ..., 98.\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.VideoTrack\n\n\nClass used to store the location of detected objects in a video file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_frame, stop_frame, confidence=-1.0, frame_locations=None, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_frame\n\n\nint\n\n\nThe first frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nstop_frame\n\n\nint\n\n\nThe last frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\nframe_locations\n\n\ndict[int, mpf_component_api.ImageLocation]\n\n\nA dict of individual detections. The key for each entry is the frame number where the detection was generated, and the value is a \nmpf_component_api.ImageLocation\n calculated as if that frame was a still image. Note that a key-value pair is \nnot\n required for every frame between the track start frame and track stop frame.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.VideoTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\ntrack = mpf_component_api.VideoTrack(0, 1)\ntrack.frame_locations[0] = mpf_component_api.ImageLocation(0, 0, 100, 100, 0.75, {'CLASSIFICATION': 'backpack'})\ntrack.frame_locations[1] = mpf_component_api.ImageLocation(10, 10, 110, 110, 0.95, {'CLASSIFICATION': 'backpack'})\ntrack.confidence = max(il.confidence for il in track.frame_locations.itervalues())\n\n\n\nmpf_component_util.VideoCapture\n\n\nmpf_component_util.VideoCapture\n is a utility class for reading videos. \nmpf_component_util.VideoCapture\n works very\nsimilarly to \ncv2.VideoCapture\n, except that it might modify the video frames based on job properties. From the point\nof view of someone using \nmpf_component_util.VideoCapture\n, these modifications are mostly transparent.\n\nmpf_component_util.VideoCapture\n makes it look like you are reading the original video file as though it has already\nbeen rotated, flipped, cropped, etc. Also, if frame skipping is enabled, such as by setting the value of the\n\nFRAME_INTERVAL\n job property, it makes it look like you are reading the video as though it never contained the\nskipped frames.\n\n\nOne issue with this approach is that the detection frame numbers and bounding box will be relative to the\nmodified video, not the original. To make the detections relative to the original video\nthe \nmpf_component_util.VideoCapture.reverse_transform(video_track)\n method must be called on each\n\nmpf_component_api.VideoTrack\n. Since the use of \nmpf_component_util.VideoCapture\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.VideoCapture\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n video_capture = mpf_component_util.VideoCapture(video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n video_capture.reverse_transform(track)\n yield track\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.VideoCaptureMixin\n for a more concise way to use\n\nmpf_component_util.VideoCapture\n below.\n\n\nmpf_component_util.VideoCaptureMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.VideoCapture\n.\n\nmpf_component_util.VideoCaptureMixin\n takes care of initializing a \nmpf_component_util.VideoCapture\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.VideoCaptureMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.VideoCaptureMixin\n.\n\n\nThe component must implement \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must read the video using the \nmpf_component_util.VideoCapture\n\n that is passed in to \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must NOT implement \nget_detections_from_video(video_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.VideoCapture.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.VideoCaptureMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_video_capture(video_job, video_capture):\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield track\n\n\n\nmpf_component_util.VideoCaptureMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\nFor example:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin, mpf_component_util.ImageReaderMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n ...\n\n @staticmethod\n def get_detections_from_image_reader(image_job, image_reader):\n ...\n\n\n\nAudio API\n\n\ncomponent.get_detections_from_audio(audio_job)\n\n\nUsed to detect objects in an audio file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_audio(self, audio_job):\n return [mpf_component_api.AudioTrack(...), ...]\n\n\n\nget_detections_from_audio\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\naudio_job\n\n\nmpf_component_api.AudioJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.AudioTrack\n\n\n\n\nmpf_component_api.AudioJob\n\n\nClass containing data used for detection of objects in an audio file.\nCurrently, audio files are not logically segmented, so a job will contain the entirety of the audio file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.mp3\".\n\n \n\n \n\n \nstart_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the beginning of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \nstop_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the end of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of audio file in milliseconds\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.AudioTrack\n\n \nAn \nmpf_component_api.AudioTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.AudioTrack\n\n\nClass used to store the location of detected objects in an audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_time, stop_time, confidence, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event started.\n\n\n\n\n\n\nstop_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event stopped.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.AudioTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\nGeneric API\n\n\ncomponent.get_detections_from_generic(generic_job)\n\n\nUsed to detect objects in files that are not video, image, or audio files. Such files are of the UNKNOWN type and\nhandled generically.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_generic(self, generic_job):\n return [mpf_component_api.GenericTrack(...), ...]\n\n\n\nget_detections_from_generic\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\ngeneric_job\n\n\nmpf_component_api.GenericJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.GenericTrack\n\n\n\n\nmpf_component_api.GenericJob\n\n\nClass containing data used for detection of objects in a file that isn't a video, image, or audio file. The file is not\nlogically segmented, so a job will contain the entirety of the file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.txt\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pair:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.GenericTrack\n\n \nAn \nmpf_component_api.GenericTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.GenericTrack\n\n\nClass used to store the location of detected objects in a file that is not a video, image, or audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nHow to Report Errors\n\n\nThe following is an example of how to throw an exception:\n\n\nimport mpf_component_api as mpf\n\n...\nraise mpf.DetectionError.MISSING_PROPERTY.exception(\n 'The REALLY_IMPORTANT property must be provided as a job property.')\n\n\n\nThe Python Batch Component API supports all of the same error types\nlisted \nhere\n for the C++ Batch Component API. Be sure to omit\nthe \nMPF_\n prefix. You can replace the \nMISSING_PROPERTY\n part in the above code with any other error type. When\ngenerating an exception, choose the type that best describes your error.\n\n\nPython Component Build Environment\n\n\nAll Python components must work with CPython 3.8.10. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof \n----.whl\n.\n\n--\n are called\n\ncompatibility tags\n. For example,\n\nmpf_component_api\n is pure Python, so the name of its wheel file is\n\nmpf_component_api-0.1-py3-none-any.whl\n. \npy3\n means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. \nnone\n means that it does not use the Python ABI. \nany\n means it will\nwork on any platform.\n\n\nThe following combinations of compatibility tags are supported:\n\n\n\n\ncp38-cp38-manylinux2014_x86_64\n\n\ncp38-cp38-manylinux2010_x86_64\n\n\ncp38-cp38-manylinux1_x86_64\n\n\ncp38-cp38-linux_x86_64\n\n\ncp38-abi3-manylinux2014_x86_64\n\n\ncp38-abi3-manylinux2010_x86_64\n\n\ncp38-abi3-manylinux1_x86_64\n\n\ncp38-abi3-linux_x86_64\n\n\ncp38-none-manylinux2014_x86_64\n\n\ncp38-none-manylinux2010_x86_64\n\n\ncp38-none-manylinux1_x86_64\n\n\ncp38-none-linux_x86_64\n\n\ncp37-abi3-manylinux2014_x86_64\n\n\ncp37-abi3-manylinux2010_x86_64\n\n\ncp37-abi3-manylinux1_x86_64\n\n\ncp37-abi3-linux_x86_64\n\n\ncp36-abi3-manylinux2014_x86_64\n\n\ncp36-abi3-manylinux2010_x86_64\n\n\ncp36-abi3-manylinux1_x86_64\n\n\ncp36-abi3-linux_x86_64\n\n\ncp35-abi3-manylinux2014_x86_64\n\n\ncp35-abi3-manylinux2010_x86_64\n\n\ncp35-abi3-manylinux1_x86_64\n\n\ncp35-abi3-linux_x86_64\n\n\ncp34-abi3-manylinux2014_x86_64\n\n\ncp34-abi3-manylinux2010_x86_64\n\n\ncp34-abi3-manylinux1_x86_64\n\n\ncp34-abi3-linux_x86_64\n\n\ncp33-abi3-manylinux2014_x86_64\n\n\ncp33-abi3-manylinux2010_x86_64\n\n\ncp33-abi3-manylinux1_x86_64\n\n\ncp33-abi3-linux_x86_64\n\n\ncp32-abi3-manylinux2014_x86_64\n\n\ncp32-abi3-manylinux2010_x86_64\n\n\ncp32-abi3-manylinux1_x86_64\n\n\ncp32-abi3-linux_x86_64\n\n\npy38-none-manylinux2014_x86_64\n\n\npy38-none-manylinux2010_x86_64\n\n\npy38-none-manylinux1_x86_64\n\n\npy38-none-linux_x86_64\n\n\npy3-none-manylinux2014_x86_64\n\n\npy3-none-manylinux2010_x86_64\n\n\npy3-none-manylinux1_x86_64\n\n\npy3-none-linux_x86_64\n\n\npy37-none-manylinux2014_x86_64\n\n\npy37-none-manylinux2010_x86_64\n\n\npy37-none-manylinux1_x86_64\n\n\npy37-none-linux_x86_64\n\n\npy36-none-manylinux2014_x86_64\n\n\npy36-none-manylinux2010_x86_64\n\n\npy36-none-manylinux1_x86_64\n\n\npy36-none-linux_x86_64\n\n\npy35-none-manylinux2014_x86_64\n\n\npy35-none-manylinux2010_x86_64\n\n\npy35-none-manylinux1_x86_64\n\n\npy35-none-linux_x86_64\n\n\npy34-none-manylinux2014_x86_64\n\n\npy34-none-manylinux2010_x86_64\n\n\npy34-none-manylinux1_x86_64\n\n\npy34-none-linux_x86_64\n\n\npy33-none-manylinux2014_x86_64\n\n\npy33-none-manylinux2010_x86_64\n\n\npy33-none-manylinux1_x86_64\n\n\npy33-none-linux_x86_64\n\n\npy32-none-manylinux2014_x86_64\n\n\npy32-none-manylinux2010_x86_64\n\n\npy32-none-manylinux1_x86_64\n\n\npy32-none-linux_x86_64\n\n\npy31-none-manylinux2014_x86_64\n\n\npy31-none-manylinux2010_x86_64\n\n\npy31-none-manylinux1_x86_64\n\n\npy31-none-linux_x86_64\n\n\npy30-none-manylinux2014_x86_64\n\n\npy30-none-manylinux2010_x86_64\n\n\npy30-none-manylinux1_x86_64\n\n\npy30-none-linux_x86_64\n\n\ncp38-none-any\n\n\npy38-none-any\n\n\npy3-none-any\n\n\npy37-none-any\n\n\npy36-none-any\n\n\npy35-none-any\n\n\npy34-none-any\n\n\npy33-none-any\n\n\npy32-none-any\n\n\npy31-none-any\n\n\npy30-none-any\n\n\n\n\nThe list above was generated with the following command:\n\npython3 -c 'import pip._internal.pep425tags as tags; print(\"\\n\".join(str(t) for t in tags.get_supported()))'\n\n\nComponents should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.\n\n\nComponent Development Best Practices\n\n\nSingle-threaded Operation\n\n\nImplementations are encouraged to operate in single-threaded mode. OpenMPF will parallelize components through\nmultiple instantiations of the component, each running as a separate service.\n\n\nStateless Behavior\n\n\nOpenMPF components should be stateless in operation and give identical output for a provided input\n(i.e. when processing the same job).\n\n\nLogging\n\n\nIt recommended that components use Python's built-in\n\nlogging\n module.\n The component should\n\nimport logging\n and call \nlogging.getLogger('')\n to get a logger instance.\nThe component should not configure logging itself. The Component Executor will configure the\n\nlogging\n module for the component. The logger will write log messages to standard error and\n\n${MPF_LOG_PATH}/${THIS_MPF_NODE}/log/.log\n. Note that multiple instances of the\nsame component can log to the same file. Also, logging content can span multiple lines.\n\n\nThe following log levels are supported: \nFATAL, ERROR, WARN, INFO, DEBUG\n.\nThe \nLOG_LEVEL\n environment variable can be set to one of the log levels to change the logging\nverbosity. When \nLOG_LEVEL\n is absent, \nINFO\n is used.\n\n\nThe format of the log messages is:\n\n\nDATE TIME LEVEL [SOURCE_FILE:LINE_NUMBER] - MESSAGE\n\n\n\nFor example:\n\n\n2018-05-03 14:41:11,703 INFO [test_component.py:44] - Logged message",
+ "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\nAPI Overview\n\n\nIn OpenMPF, a \ncomponent\n is a plugin that receives jobs (containing media), processes that media, and returns results.\n\n\nThe OpenMPF Batch Component API currently supports the development of \ndetection components\n, which are used detect\nobjects in image, video, audio, or other (generic) files that reside on disk.\n\n\nUsing this API, detection components can be built to provide:\n\n\n\n\nDetection (Localizing an object)\n\n\nTracking (Localizing an object across multiple frames)\n\n\nClassification (Detecting the type of object and optionally localizing that object)\n\n\nTranscription (Detecting speech and transcribing it into text)\n\n\n\n\nHow Components Integrate into OpenMPF\n\n\nComponents are integrated into OpenMPF through the use of OpenMPF's \nComponent Executable\n.\nDevelopers create component libraries that encapsulate the component detection logic.\nEach instance of the Component Executable loads one of these libraries and uses it to service job requests\nsent by the OpenMPF Workflow Manager (WFM).\n\n\nThe Component Executable:\n\n\n\n\nReceives and parses job requests from the WFM\n\n\nInvokes methods on the component library to obtain detection results\n\n\nPopulates and sends the respective responses to the WFM\n\n\n\n\nThe basic pseudocode for the Component Executable is as follows:\n\n\ncomponent_cls = locate_component_class()\ncomponent = component_cls()\n\nwhile True:\n job = receive_job()\n\n if is_image_job(job) and hasattr(component, 'get_detections_from_image'):\n detections = component.get_detections_from_image(job)\n send_job_response(detections)\n\n elif is_video_job(job) and hasattr(component, 'get_detections_from_video'):\n detections = component.get_detections_from_video(job)\n send_job_response(detections)\n\n elif is_audio_job(job) and hasattr(component, 'get_detections_from_audio'):\n detections = component.get_detections_from_audio(job)\n send_job_response(detections)\n\n elif is_generic_job(job) and hasattr(component, 'get_detections_from_generic'):\n detections = component.get_detections_from_generic(job)\n send_job_response(detections)\n\n\n\nEach instance of a Component Executable runs as a separate process.\n\n\nThe Component Executable receives and parses requests from the WFM, invokes methods on the Component Logic to get\ndetection objects, and subsequently populates responses with the component output and sends them to the WFM.\n\n\nA component developer implements a detection component by creating a class that defines one or more of the\nget_detections_from_* methods. See the \nAPI Specification\n for more information.\n\n\nThe figures below present high-level component diagrams of the Python Batch Component API.\nThis figure shows the basic structure:\n\n\n\n\nThe Node Manager is only used in a non-Docker deployment. In a Docker deployment the Component Executor is started by the Docker container itself.\n\n\nThe Component Executor determines that it is running a Python component so it creates an instance of the\n\nPythonComponentHandle\n\nclass. The \nPythonComponentHandle\n class creates an instance of the component class and calls one of the\n\nget_detections_from_*\n methods on the component instance. The example\nabove is an image component, so \nPythonComponentHandle\n calls \nExampleImageFaceDetection.get_detections_from_image\n\non the component instance. The component instance creates an instance of\n\nmpf_component_util.ImageReader\n to access the image. Components that support video\nwould implement \nget_detections_from_video\n and use\n\nmpf_component_util.VideoCapture\n instead.\n\n\nThis figure show the structure when the mixin classes are used:\n\n\n\n\nThe figure above shows a video component, \nExampleVideoFaceDetection\n, that extends the\n\nmpf_component_util.VideoCaptureMixin\n class. \nPythonComponentHandle\n will\ncall \nget_detections_from_video\n on an instance of \nExampleVideoFaceDetection\n. \nExampleVideoFaceDetection\n does not\nimplement \nget_detections_from_video\n, so the implementation inherited from \nmpf_component_util.VideoCaptureMixin\n\ngets called. \nmpf_component_util.VideoCaptureMixin.get_detections_from_video\n creates an instance of\n\nmpf_component_util.VideoCapture\n and calls\n\nExampleVideoFaceDetection.get_detections_from_video_capture\n, passing in the \nmpf_component_util.VideoCapture\n it\njust created. \nExampleVideoFaceDetection.get_detections_from_video_capture\n is where the component reads the video\nusing the passed-in \nmpf_component_util.VideoCapture\n and attempts to find detections. Components that support images\nwould extend \nmpf_component_util.ImageReaderMixin\n, implement\n\nget_detections_from_image_reader\n, and access the image using the passed-in\n\nmpf_component_util.ImageReader\n.\n\n\nDuring component registration a \nvirtualenv\n is created for each component.\nThe virtualenv has access to the built-in Python libraries, but does not have access to any third party packages\nthat might be installed on the system. When creating the virtualenv for a setuptools-based component the only packages\nthat get installed are the component itself and any dependencies specified in the setup.cfg\nfile (including their transitive dependencies). When creating the virtualenv for a basic Python component the only\npackage that gets installed is \nmpf_component_api\n. \nmpf_component_api\n is the package containing the job classes\n(e.g. \nmpf_component_api.ImageJob\n,\n\nmpf_component_api.VideoJob\n) and detection result classes\n(e.g. \nmpf_component_api.ImageLocation\n,\n\nmpf_component_api.VideoTrack\n).\n\n\nHow to Create a Python Component\n\n\nThere are two types of Python components that are supported, setuptools-based components and basic Python components.\nBasic Python components are quicker to set up, but have no built-in support for dependency management.\nAll dependencies must be handled by the developer. Setuptools-based components are recommended since they use\nsetuptools and pip for dependency management.\n\n\nEither way, the end goal is to create a Docker image. This document describes the steps for developing a component\noutside of Docker. Many developers prefer to do that first and then focus on building and running their component\nwithin Docker after they are confident it works in a local environment. Alternatively, some developers feel confident\ndeveloping their component entirely within Docker. When you're ready for the Docker steps, refer to the\n\nREADME\n.\n\n\nGet openmpf-python-component-sdk\n\n\nIn order to create a Python component you will need to clone the\n\nopenmpf-python-component-sdk repository\n if you don't\nalready have it. While not technically required, it is recommended to also clone the\n\nopenmpf-build-tools repository\n.\nThe rest of the steps assume you cloned openmpf-python-component-sdk to\n\n~/openmpf-projects/openmpf-python-component-sdk\n. The rest of the steps also assume that if you cloned the\nopenmpf-build-tools repository, you cloned it to \n~/openmpf-projects/openmpf-build-tools\n.\n\n\nSetup Python Component Libraries\n\n\nThe component packaging steps require that wheel files for \nmpf_component_api\n, \nmpf_component_util\n, and\ntheir dependencies are available in the \n~/mpf-sdk-install/python/wheelhouse\n directory.\n\n\nIf you have openmpf-build-tools, then you can run:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk\n\n\n\nTo setup the libraries manually you can run:\n\n\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/api\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/component_util\n\n\n\nHow to Create a Setuptools-based Python Component\n\n\nIn this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json\n\n\n\n2. Create pyproject.toml file in project's top-level directory:\n\n\npyproject.toml\n should contain the following content:\n\n\n[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n\n\n3. Create setup.cfg file in project's top-level directory:\n\n\nExample of a minimal setup.cfg file:\n\n\n[metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/*\n\n\n\nThe \nname\n parameter defines the distribution name. Typically the distribution name matches the component name.\n\n\nAny dependencies that component requires should be listed in the \ninstall_requires\n field.\n\n\nThe Component Executor looks in the \nentry_points\n element and uses the \nmpf.exported_component\n field to determine\nthe component class. The right hand side of \ncomponent =\n should be the dotted module name, followed by a \n:\n,\nfollowed by the name of the class. The general pattern is\n\n'mpf.exported_component': 'component = .:'\n. In the above example,\n\nMyComponent\n is the class name. The module is listed as \nmy_component.my_component\n because the \nmy_component\n\npackage contains the \nmy_component.py\n file and the \nmy_component.py\n file contains the \nMyComponent\n class.\n\n\nThe \n[options.package_data]\n section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed.\n\n\n4. Create descriptor.json file in MyComponent/plugin-files/descriptor:\n\n\nThe \nbatchLibrary\n field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \n\"batchLibrary\" : \"MyComponent\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n5. Implement your component class:\n\n\nBelow is an example of the structure of a simple component. This component extends\n\nmpf_component_util.VideoCaptureMixin\n to simplify the use of\n\nmpf_component_util.VideoCapture\n. You would replace the call to\n\nrun_detection_algorithm_on_frame\n with your component-specific logic.\n\n\nimport logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track\n\n\n\n6. Optional: Add prebuilt wheel files if not available on PyPi:\n\n\nIf your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's \nplugin-files/wheelhouse\n directory.\nThe prebuilt library names must be listed in your \nsetup.cfg\n file's \ninstall_requires\n field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's \nplugin-files/wheelhouse\n directory.\n\n\n7. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nMyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following commands:\n\n\nmkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n8. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nHow to Create a Basic Python Component\n\n\nIn this example we create a basic Python component that supports video. An example of a basic Python component can be\nfound\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/descriptor\ntouch MyComponent/descriptor/descriptor.json\ntouch MyComponent/my_component.py\n\n\n\n2. Create descriptor.json file in MyComponent/descriptor:\n\n\nThe \nbatchLibrary\n field should be the full path to the Python file containing your component class.\nIn this example the field should be: \n\"batchLibrary\" : \"${MPF_HOME}/plugins/MyComponent/my_component.py\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n3. Implement your component class:\n\n\nBelow is an example of the structure of a simple component that does not use\n\nmpf_component_util.VideoCaptureMixin\n. You would replace the call to\n\nrun_detection_algorithm\n with your component-specific logic.\n\n\nimport logging\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n return run_detection_algorithm(video_job)\n\nEXPORT_MPF_COMPONENT = MyComponent\n\n\n\nThe Component Executor looks for a module-level variable named \nEXPORT_MPF_COMPONENT\n to specify which class\nis the component.\n\n\n4. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following command:\n\n\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n5. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nAPI Specification\n\n\nAn OpenMPF Python component is a class that defines one or more of the get_detections_from_* methods.\n\n\ncomponent.get_detections_from_* methods\n\n\nAll get_detections_from_* methods are invoked through an instance of the component class. The only parameter passed\nin is an appropriate job object (e.g. \nmpf_component_api.ImageJob\n, \nmpf_component_api.VideoJob\n). Since the methods\nare invoked through an instance, instance methods and class methods end up with two arguments, the first is either the\ninstance or the class, respectively. All get_detections_from_* methods can be implemented either as an instance method,\na static method, or a class method.\nFor example:\n\n\ninstance method:\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nstatic method:\n\n\nclass MyComponent:\n @staticmethod\n def get_detections_from_image(image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nclass method:\n\n\nclass MyComponent:\n @classmethod\n def get_detections_from_image(cls, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nAll get_detections_from_* methods must return an iterable of the appropriate detection type\n(e.g. \nmpf_component_api.ImageLocation\n, \nmpf_component_api.VideoTrack\n). The return value is normally a list or generator,\nbut any iterable can be used.\n\n\nImage API\n\n\ncomponent.get_detections_from_image(image_job)\n\n\nUsed to detect objects in an image file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nget_detections_from_image\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nimage_job\n\n\nmpf_component_api.ImageJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.ImageLocation\n\n\n\n\nmpf_component_api.ImageJob\n\n\nClass containing data used for detection of objects in an image file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.jpg\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of the image in pixels\n\n \nFRAME_HEIGHT\n : the height of the image in pixels\n\n \n\n May include the following key-value pairs:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \nHORIZONTAL_FLIP\n : true if the image is mirrored across the Y-axis, otherwise false\n\n \nEXIF_ORIENTATION\n : the standard EXIF orientation tag; a value between 1 and 8\n\n \n\n \n\n \n\n \n\n \nfeed_forward_location\n\n \nNone\n or \nmpf_component_api.ImageLocation\n\n \nAn \nmpf_component_api.ImageLocation\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.ImageLocation\n\n\nClass used to store the location of detected objects in a image file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, x_left_upper, y_left_upper, width, height, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nx_left_upper\n\n\nint\n\n\nUpper left X coordinate of the detected object.\n\n\n\n\n\n\ny_left_upper\n\n\nint\n\n\nUpper left Y coordinate of the detected object.\n\n\n\n\n\n\nwidth\n\n\nint\n\n\nThe width of the detected object.\n\n\n\n\n\n\nheight\n\n\nint\n\n\nThe height of the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nSee here for information about rotation and horizontal flipping.\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\nmpf_component_api.ImageLocation(0, 0, 100, 100, 1.0, {'CLASSIFICATION': 'backpack'})\n\n\n\nmpf_component_util.ImageReader\n\n\nmpf_component_util.ImageReader\n is a utility class for accessing images. It is the image equivalent to\n\nmpf_component_util.VideoCapture\n. Like \nmpf_component_util.VideoCapture\n,\nit may modify the read-in frame data based on job_properties. From the point of view of someone using\n\nmpf_component_util.ImageReader\n, these modifications are mostly transparent. \nmpf_component_util.ImageReader\n makes\nit look like you are reading the original image file as though it has already been rotated, flipped, cropped, etc.\n\n\nOne issue with this approach is that the detection bounding boxes will be relative to the\nmodified frame data, not the original. To make the detections relative to the original image\nthe \nmpf_component_util.ImageReader.reverse_transform(image_location)\n method must be called on each\n\nmpf_component_api.ImageLocation\n. Since the use of \nmpf_component_util.ImageReader\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.ImageReader\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_image(image_job):\n image_reader = mpf_component_util.ImageReader(image_job)\n image = image_reader.get_image()\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_image_locations = run_component_specific_algorithm(image)\n for result in result_image_locations:\n image_reader.reverse_transform(result)\n yield result\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.ImageReaderMixin\n for a more concise way to use\n\nmpf_component_util.ImageReader\n below.\n\n\nmpf_component_util.ImageReaderMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.ImageReader\n.\n\nmpf_component_util.ImageReaderMixin\n takes care of initializing a \nmpf_component_util.ImageReader\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.ImageReaderMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.ImageReaderMixin\n.\n\n\nThe component must implement \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must read the image using the \nmpf_component_util.ImageReader\n\n that is passed in to \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must NOT implement \nget_detections_from_image(image_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.ImageReader.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.ImageReaderMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.ImageReaderMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_image_reader(image_job, image_reader):\n image = image_reader.get_image()\n\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n return run_component_specific_algorithm(image)\n\n\n\nmpf_component_util.ImageReaderMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\n\n\nVideo API\n\n\ncomponent.get_detections_from_video(video_job)\n\n\nUsed to detect objects in a video file. Prior to being sent to the component, videos are split into logical \"segments\"\nof video data and each segment (containing a range of frames) is assigned to a different job. Components are not\nguaranteed to receive requests in any order. For example, the first request processed by a component might receive a\nrequest for frames 300-399 of a Video A, while the next request may cover frames 900-999 of a Video B.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_video(self, video_job):\n return [mpf_component_api.VideoTrack(...), ...]\n\n\n\nget_detections_from_video\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nvideo_job\n\n\nmpf_component_api.VideoJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.VideoTrack\n\n\n\n\nmpf_component_api.VideoJob\n\n\nClass containing data used for detection of objects in a video file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.avi\".\n\n \n\n \n\n \nstart_frame\n\n \nint\n\n \nThe first frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \nstop_frame\n\n \nint\n\n \nThe last frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of video in milliseconds\n\n \nFPS\n : frames per second (averaged for variable frame rate video)\n\n \nFRAME_COUNT\n : the number of frames in the video\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of a frame in pixels\n\n \nFRAME_HEIGHT\n : the height of a frame in pixels\n\n \nHAS_CONSTANT_FRAME_RATE\n : set to true if the video has a constant frame rate; otherwise, omitted or set to false if the video has variable frame rate or the type of frame rate cannot be determined\n\n \n\n May include the following key-value pair:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.VideoTrack\n\n \nAn \nmpf_component_api.VideoTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\n\n\nIMPORTANT:\n \nFRAME_INTERVAL\n is a common job property that many components support.\nFor frame intervals greater than 1, the component must look for detections starting with the first\nframe, and then skip frames as specified by the frame interval, until or before it reaches the stop frame.\nFor example, given a start frame of 0, a stop frame of 99, and a frame interval of 2, then the detection component\nmust look for objects in frames numbered 0, 2, 4, 6, ..., 98.\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.VideoTrack\n\n\nClass used to store the location of detected objects in a video file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_frame, stop_frame, confidence=-1.0, frame_locations=None, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_frame\n\n\nint\n\n\nThe first frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nstop_frame\n\n\nint\n\n\nThe last frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\nframe_locations\n\n\ndict[int, mpf_component_api.ImageLocation]\n\n\nA dict of individual detections. The key for each entry is the frame number where the detection was generated, and the value is a \nmpf_component_api.ImageLocation\n calculated as if that frame was a still image. Note that a key-value pair is \nnot\n required for every frame between the track start frame and track stop frame.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.VideoTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\ntrack = mpf_component_api.VideoTrack(0, 1)\ntrack.frame_locations[0] = mpf_component_api.ImageLocation(0, 0, 100, 100, 0.75, {'CLASSIFICATION': 'backpack'})\ntrack.frame_locations[1] = mpf_component_api.ImageLocation(10, 10, 110, 110, 0.95, {'CLASSIFICATION': 'backpack'})\ntrack.confidence = max(il.confidence for il in track.frame_locations.itervalues())\n\n\n\nmpf_component_util.VideoCapture\n\n\nmpf_component_util.VideoCapture\n is a utility class for reading videos. \nmpf_component_util.VideoCapture\n works very\nsimilarly to \ncv2.VideoCapture\n, except that it might modify the video frames based on job properties. From the point\nof view of someone using \nmpf_component_util.VideoCapture\n, these modifications are mostly transparent.\n\nmpf_component_util.VideoCapture\n makes it look like you are reading the original video file as though it has already\nbeen rotated, flipped, cropped, etc. Also, if frame skipping is enabled, such as by setting the value of the\n\nFRAME_INTERVAL\n job property, it makes it look like you are reading the video as though it never contained the\nskipped frames.\n\n\nOne issue with this approach is that the detection frame numbers and bounding box will be relative to the\nmodified video, not the original. To make the detections relative to the original video\nthe \nmpf_component_util.VideoCapture.reverse_transform(video_track)\n method must be called on each\n\nmpf_component_api.VideoTrack\n. Since the use of \nmpf_component_util.VideoCapture\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.VideoCapture\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n video_capture = mpf_component_util.VideoCapture(video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n video_capture.reverse_transform(track)\n yield track\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.VideoCaptureMixin\n for a more concise way to use\n\nmpf_component_util.VideoCapture\n below.\n\n\nmpf_component_util.VideoCaptureMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.VideoCapture\n.\n\nmpf_component_util.VideoCaptureMixin\n takes care of initializing a \nmpf_component_util.VideoCapture\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.VideoCaptureMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.VideoCaptureMixin\n.\n\n\nThe component must implement \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must read the video using the \nmpf_component_util.VideoCapture\n\n that is passed in to \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must NOT implement \nget_detections_from_video(video_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.VideoCapture.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.VideoCaptureMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_video_capture(video_job, video_capture):\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield track\n\n\n\nmpf_component_util.VideoCaptureMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\nFor example:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin, mpf_component_util.ImageReaderMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n ...\n\n @staticmethod\n def get_detections_from_image_reader(image_job, image_reader):\n ...\n\n\n\nAudio API\n\n\ncomponent.get_detections_from_audio(audio_job)\n\n\nUsed to detect objects in an audio file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_audio(self, audio_job):\n return [mpf_component_api.AudioTrack(...), ...]\n\n\n\nget_detections_from_audio\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\naudio_job\n\n\nmpf_component_api.AudioJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.AudioTrack\n\n\n\n\nmpf_component_api.AudioJob\n\n\nClass containing data used for detection of objects in an audio file.\nCurrently, audio files are not logically segmented, so a job will contain the entirety of the audio file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.mp3\".\n\n \n\n \n\n \nstart_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the beginning of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \nstop_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the end of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of audio file in milliseconds\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.AudioTrack\n\n \nAn \nmpf_component_api.AudioTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.AudioTrack\n\n\nClass used to store the location of detected objects in an audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_time, stop_time, confidence, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event started.\n\n\n\n\n\n\nstop_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event stopped.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.AudioTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\nGeneric API\n\n\ncomponent.get_detections_from_generic(generic_job)\n\n\nUsed to detect objects in files that are not video, image, or audio files. Such files are of the UNKNOWN type and\nhandled generically.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_generic(self, generic_job):\n return [mpf_component_api.GenericTrack(...), ...]\n\n\n\nget_detections_from_generic\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\ngeneric_job\n\n\nmpf_component_api.GenericJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.GenericTrack\n\n\n\n\nmpf_component_api.GenericJob\n\n\nClass containing data used for detection of objects in a file that isn't a video, image, or audio file. The file is not\nlogically segmented, so a job will contain the entirety of the file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.txt\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pair:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.GenericTrack\n\n \nAn \nmpf_component_api.GenericTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.GenericTrack\n\n\nClass used to store the location of detected objects in a file that is not a video, image, or audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nHow to Report Errors\n\n\nThe following is an example of how to throw an exception:\n\n\nimport mpf_component_api as mpf\n\n...\nraise mpf.DetectionError.MISSING_PROPERTY.exception(\n 'The REALLY_IMPORTANT property must be provided as a job property.')\n\n\n\nThe Python Batch Component API supports all of the same error types\nlisted \nhere\n for the C++ Batch Component API. Be sure to omit\nthe \nMPF_\n prefix. You can replace the \nMISSING_PROPERTY\n part in the above code with any other error type. When\ngenerating an exception, choose the type that best describes your error.\n\n\nPython Component Build Environment\n\n\nAll Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof \n----.whl\n.\n\n--\n are called\n\ncompatibility tags\n. For example,\n\nmpf_component_api\n is pure Python, so the name of its wheel file is\n\nmpf_component_api-0.1-py3-none-any.whl\n. \npy3\n means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. \nnone\n means that it does not use the Python ABI. \nany\n means it will\nwork on any platform.\n\n\nThe acceptable Python version tags are:\n\n\n\n\ncp312\n (or lower)\n\n\npy312\n (or lower)\n\n\n\n\nThe \nONLY\n acceptable ABI tags are:\n\n\n\n\ncp312\n\n\nabi3\n\n\nnone\n\n\n\n\nThe acceptable platform tags are:\n\n\n\n\nany\n\n\nlinux_x86_64\n\n\nmanylinux2010_x86_64\n\n\nmanylinux2014_x86_64\n\n\nmanylinux1_x86_64\n\n\nmanylinux_2_5_x86_64\n through \nmanylinux_2_31_x86_64\n\n\n\n\nComponents should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.\n\n\nComponent Development Best Practices\n\n\nSingle-threaded Operation\n\n\nImplementations are encouraged to operate in single-threaded mode. OpenMPF will parallelize components through\nmultiple instantiations of the component, each running as a separate service.\n\n\nStateless Behavior\n\n\nOpenMPF components should be stateless in operation and give identical output for a provided input\n(i.e. when processing the same job).\n\n\nLogging\n\n\nIt recommended that components use Python's built-in\n\nlogging\n module.\n The component should\n\nimport logging\n and call \nlogging.getLogger('')\n to get a logger instance.\nThe component should not configure logging itself. The Component Executor will configure the\n\nlogging\n module for the component. The logger will write log messages to standard error and\n\n${MPF_LOG_PATH}/${THIS_MPF_NODE}/log/.log\n. Note that multiple instances of the\nsame component can log to the same file. Also, logging content can span multiple lines.\n\n\nThe following log levels are supported: \nFATAL, ERROR, WARN, INFO, DEBUG\n.\nThe \nLOG_LEVEL\n environment variable can be set to one of the log levels to change the logging\nverbosity. When \nLOG_LEVEL\n is absent, \nINFO\n is used.\n\n\nThe format of the log messages is:\n\n\nDATE TIME LEVEL [SOURCE_FILE:LINE_NUMBER] - MESSAGE\n\n\n\nFor example:\n\n\n2018-05-03 14:41:11,703 INFO [test_component.py:44] - Logged message",
"title": "Python Batch Component API"
},
{
@@ -972,7 +972,7 @@
},
{
"location": "/Python-Batch-Component-API/index.html#how-to-create-a-setuptools-based-python-component",
- "text": "In this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found here . This is the recommended project structure: ComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl 1. Create directory structure: mkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json 2. Create pyproject.toml file in project's top-level directory: pyproject.toml should contain the following content: [build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\" 3. Create setup.cfg file in project's top-level directory: Example of a minimal setup.cfg file: [metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/* The name parameter defines the distribution name. Typically the distribution name matches the component name. Any dependencies that component requires should be listed in the install_requires field. The Component Executor looks in the entry_points element and uses the mpf.exported_component field to determine\nthe component class. The right hand side of component = should be the dotted module name, followed by a : ,\nfollowed by the name of the class. The general pattern is 'mpf.exported_component': 'component = .:' . In the above example, MyComponent is the class name. The module is listed as my_component.my_component because the my_component \npackage contains the my_component.py file and the my_component.py file contains the MyComponent class. The [options.package_data] section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed. 4. Create descriptor.json file in MyComponent/plugin-files/descriptor: The batchLibrary field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \"batchLibrary\" : \"MyComponent\" .\nSee the Component Descriptor Reference for details about\nthe descriptor format. 5. Implement your component class: Below is an example of the structure of a simple component. This component extends mpf_component_util.VideoCaptureMixin to simplify the use of mpf_component_util.VideoCapture . You would replace the call to run_detection_algorithm_on_frame with your component-specific logic. import logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track 6. Optional: Add prebuilt wheel files if not available on PyPi: If your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's plugin-files/wheelhouse directory.\nThe prebuilt library names must be listed in your setup.cfg file's install_requires field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's plugin-files/wheelhouse directory. 7. Optional: Create the plugin package for non-Docker deployments: The directory structure of the .tar.gz file will be: MyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-1.18.4-cp38-cp38-manylinux1_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.2.0.34-cp38-cp38-manylinux1_x86_64.whl To create the plugin packages you can run the build script as follows: ~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent The plugin package can also be built manually using the following commands: mkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent 8. Create the component Docker image: See the README .",
+ "text": "In this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found here . This is the recommended project structure: ComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl 1. Create directory structure: mkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json 2. Create pyproject.toml file in project's top-level directory: pyproject.toml should contain the following content: [build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\" 3. Create setup.cfg file in project's top-level directory: Example of a minimal setup.cfg file: [metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/* The name parameter defines the distribution name. Typically the distribution name matches the component name. Any dependencies that component requires should be listed in the install_requires field. The Component Executor looks in the entry_points element and uses the mpf.exported_component field to determine\nthe component class. The right hand side of component = should be the dotted module name, followed by a : ,\nfollowed by the name of the class. The general pattern is 'mpf.exported_component': 'component = .:' . In the above example, MyComponent is the class name. The module is listed as my_component.my_component because the my_component \npackage contains the my_component.py file and the my_component.py file contains the MyComponent class. The [options.package_data] section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed. 4. Create descriptor.json file in MyComponent/plugin-files/descriptor: The batchLibrary field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \"batchLibrary\" : \"MyComponent\" .\nSee the Component Descriptor Reference for details about\nthe descriptor format. 5. Implement your component class: Below is an example of the structure of a simple component. This component extends mpf_component_util.VideoCaptureMixin to simplify the use of mpf_component_util.VideoCapture . You would replace the call to run_detection_algorithm_on_frame with your component-specific logic. import logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track 6. Optional: Add prebuilt wheel files if not available on PyPi: If your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's plugin-files/wheelhouse directory.\nThe prebuilt library names must be listed in your setup.cfg file's install_requires field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's plugin-files/wheelhouse directory. 7. Optional: Create the plugin package for non-Docker deployments: The directory structure of the .tar.gz file will be: MyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl To create the plugin packages you can run the build script as follows: ~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent The plugin package can also be built manually using the following commands: mkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent 8. Create the component Docker image: See the README .",
"title": "How to Create a Setuptools-based Python Component"
},
{
@@ -1097,7 +1097,7 @@
},
{
"location": "/Python-Batch-Component-API/index.html#python-component-build-environment",
- "text": "All Python components must work with CPython 3.8.10. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof ----.whl . -- are called compatibility tags . For example, mpf_component_api is pure Python, so the name of its wheel file is mpf_component_api-0.1-py3-none-any.whl . py3 means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. none means that it does not use the Python ABI. any means it will\nwork on any platform. The following combinations of compatibility tags are supported: cp38-cp38-manylinux2014_x86_64 cp38-cp38-manylinux2010_x86_64 cp38-cp38-manylinux1_x86_64 cp38-cp38-linux_x86_64 cp38-abi3-manylinux2014_x86_64 cp38-abi3-manylinux2010_x86_64 cp38-abi3-manylinux1_x86_64 cp38-abi3-linux_x86_64 cp38-none-manylinux2014_x86_64 cp38-none-manylinux2010_x86_64 cp38-none-manylinux1_x86_64 cp38-none-linux_x86_64 cp37-abi3-manylinux2014_x86_64 cp37-abi3-manylinux2010_x86_64 cp37-abi3-manylinux1_x86_64 cp37-abi3-linux_x86_64 cp36-abi3-manylinux2014_x86_64 cp36-abi3-manylinux2010_x86_64 cp36-abi3-manylinux1_x86_64 cp36-abi3-linux_x86_64 cp35-abi3-manylinux2014_x86_64 cp35-abi3-manylinux2010_x86_64 cp35-abi3-manylinux1_x86_64 cp35-abi3-linux_x86_64 cp34-abi3-manylinux2014_x86_64 cp34-abi3-manylinux2010_x86_64 cp34-abi3-manylinux1_x86_64 cp34-abi3-linux_x86_64 cp33-abi3-manylinux2014_x86_64 cp33-abi3-manylinux2010_x86_64 cp33-abi3-manylinux1_x86_64 cp33-abi3-linux_x86_64 cp32-abi3-manylinux2014_x86_64 cp32-abi3-manylinux2010_x86_64 cp32-abi3-manylinux1_x86_64 cp32-abi3-linux_x86_64 py38-none-manylinux2014_x86_64 py38-none-manylinux2010_x86_64 py38-none-manylinux1_x86_64 py38-none-linux_x86_64 py3-none-manylinux2014_x86_64 py3-none-manylinux2010_x86_64 py3-none-manylinux1_x86_64 py3-none-linux_x86_64 py37-none-manylinux2014_x86_64 py37-none-manylinux2010_x86_64 py37-none-manylinux1_x86_64 py37-none-linux_x86_64 py36-none-manylinux2014_x86_64 py36-none-manylinux2010_x86_64 py36-none-manylinux1_x86_64 py36-none-linux_x86_64 py35-none-manylinux2014_x86_64 py35-none-manylinux2010_x86_64 py35-none-manylinux1_x86_64 py35-none-linux_x86_64 py34-none-manylinux2014_x86_64 py34-none-manylinux2010_x86_64 py34-none-manylinux1_x86_64 py34-none-linux_x86_64 py33-none-manylinux2014_x86_64 py33-none-manylinux2010_x86_64 py33-none-manylinux1_x86_64 py33-none-linux_x86_64 py32-none-manylinux2014_x86_64 py32-none-manylinux2010_x86_64 py32-none-manylinux1_x86_64 py32-none-linux_x86_64 py31-none-manylinux2014_x86_64 py31-none-manylinux2010_x86_64 py31-none-manylinux1_x86_64 py31-none-linux_x86_64 py30-none-manylinux2014_x86_64 py30-none-manylinux2010_x86_64 py30-none-manylinux1_x86_64 py30-none-linux_x86_64 cp38-none-any py38-none-any py3-none-any py37-none-any py36-none-any py35-none-any py34-none-any py33-none-any py32-none-any py31-none-any py30-none-any The list above was generated with the following command: python3 -c 'import pip._internal.pep425tags as tags; print(\"\\n\".join(str(t) for t in tags.get_supported()))' Components should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.",
+ "text": "All Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof ----.whl . -- are called compatibility tags . For example, mpf_component_api is pure Python, so the name of its wheel file is mpf_component_api-0.1-py3-none-any.whl . py3 means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. none means that it does not use the Python ABI. any means it will\nwork on any platform. The acceptable Python version tags are: cp312 (or lower) py312 (or lower) The ONLY acceptable ABI tags are: cp312 abi3 none The acceptable platform tags are: any linux_x86_64 manylinux2010_x86_64 manylinux2014_x86_64 manylinux1_x86_64 manylinux_2_5_x86_64 through manylinux_2_31_x86_64 Components should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.",
"title": "Python Component Build Environment"
},
{
diff --git a/docs/site/sitemap.xml b/docs/site/sitemap.xml
index adcaa9986b91..0ce128ff5036 100644
--- a/docs/site/sitemap.xml
+++ b/docs/site/sitemap.xml
@@ -2,157 +2,157 @@
/index.html
- 2025-03-17
+ 2025-08-19
daily
/Release-Notes/index.html
- 2025-03-17
+ 2025-08-19
daily
/License-And-Distribution/index.html
- 2025-03-17
+ 2025-08-19
daily
/Acknowledgements/index.html
- 2025-03-17
+ 2025-08-19
daily
/Install-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Admin-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/User-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/OpenID-Connect-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Media-Segmentation-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Feed-Forward-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Derivative-Media-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Object-Storage-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Markup-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/TiesDb-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Trigger-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Roll-Up-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Health-Check-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Quality-Selection-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Media-Selectors-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/REST-API/index.html
- 2025-03-17
+ 2025-08-19
daily
/Component-API-Overview/index.html
- 2025-03-17
+ 2025-08-19
daily
/Component-Descriptor-Reference/index.html
- 2025-03-17
+ 2025-08-19
daily
/CPP-Batch-Component-API/index.html
- 2025-03-17
+ 2025-08-19
daily
/Python-Batch-Component-API/index.html
- 2025-03-17
+ 2025-08-19
daily
/Java-Batch-Component-API/index.html
- 2025-03-17
+ 2025-08-19
daily
/GPU-Support-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Contributor-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Development-Environment-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Node-Guide/index.html
- 2025-03-17
+ 2025-08-19
daily
/Workflow-Manager-Architecture/index.html
- 2025-03-17
+ 2025-08-19
daily
/CPP-Streaming-Component-API/index.html
- 2025-03-17
+ 2025-08-19
daily
\ No newline at end of file
From 6472e2a2f662c68d63a70212af480cf943798a3a Mon Sep 17 00:00:00 2001
From: Brian Rosenberg
Date: Tue, 19 Aug 2025 09:28:28 -0400
Subject: [PATCH 3/3] Update compatibility tags section
---
docs/docs/Python-Batch-Component-API.md | 6 ++++--
docs/site/Python-Batch-Component-API/index.html | 5 +++--
docs/site/index.html | 2 +-
docs/site/search/search_index.json | 4 ++--
4 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/docs/docs/Python-Batch-Component-API.md b/docs/docs/Python-Batch-Component-API.md
index 76b88be7c977..7502567d6c54 100644
--- a/docs/docs/Python-Batch-Component-API.md
+++ b/docs/docs/Python-Batch-Component-API.md
@@ -1136,11 +1136,13 @@ The acceptable platform tags are:
- `any`
- `linux_x86_64`
+- `manylinux1_x86_64`
- `manylinux2010_x86_64`
- `manylinux2014_x86_64`
-- `manylinux1_x86_64`
-- `manylinux_2_5_x86_64` through `manylinux_2_31_x86_64`
+- `manylinux_2_5_x86_64` through `manylinux_2_39_x86_64`
+
+The full list of compatible tags can be listed by running: `pip3 debug --verbose`
Components should be supplied as a tar file, which includes not only the component library, but any other libraries or
files needed for execution. This includes all other non-standard libraries used by the component
diff --git a/docs/site/Python-Batch-Component-API/index.html b/docs/site/Python-Batch-Component-API/index.html
index 11f3b5ba4651..c430cd3ff9d1 100644
--- a/docs/site/Python-Batch-Component-API/index.html
+++ b/docs/site/Python-Batch-Component-API/index.html
@@ -1419,11 +1419,12 @@ Python Component Build Environment
any
linux_x86_64
+manylinux1_x86_64
manylinux2010_x86_64
manylinux2014_x86_64
-manylinux1_x86_64
-manylinux_2_5_x86_64 through manylinux_2_31_x86_64
+manylinux_2_5_x86_64 through manylinux_2_39_x86_64
+
The full list of compatible tags can be listed by running: pip3 debug --verbose
Components should be supplied as a tar file, which includes not only the component library, but any other libraries or
files needed for execution. This includes all other non-standard libraries used by the component
(aside from the standard Python libraries), and any configuration or data files.
diff --git a/docs/site/index.html b/docs/site/index.html
index fa97015940fe..bff2dc149b6d 100644
--- a/docs/site/index.html
+++ b/docs/site/index.html
@@ -404,5 +404,5 @@ Overview
diff --git a/docs/site/search/search_index.json b/docs/site/search/search_index.json
index 3384dca29300..7f8f646aa524 100644
--- a/docs/site/search/search_index.json
+++ b/docs/site/search/search_index.json
@@ -942,7 +942,7 @@
},
{
"location": "/Python-Batch-Component-API/index.html",
- "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\nAPI Overview\n\n\nIn OpenMPF, a \ncomponent\n is a plugin that receives jobs (containing media), processes that media, and returns results.\n\n\nThe OpenMPF Batch Component API currently supports the development of \ndetection components\n, which are used detect\nobjects in image, video, audio, or other (generic) files that reside on disk.\n\n\nUsing this API, detection components can be built to provide:\n\n\n\n\nDetection (Localizing an object)\n\n\nTracking (Localizing an object across multiple frames)\n\n\nClassification (Detecting the type of object and optionally localizing that object)\n\n\nTranscription (Detecting speech and transcribing it into text)\n\n\n\n\nHow Components Integrate into OpenMPF\n\n\nComponents are integrated into OpenMPF through the use of OpenMPF's \nComponent Executable\n.\nDevelopers create component libraries that encapsulate the component detection logic.\nEach instance of the Component Executable loads one of these libraries and uses it to service job requests\nsent by the OpenMPF Workflow Manager (WFM).\n\n\nThe Component Executable:\n\n\n\n\nReceives and parses job requests from the WFM\n\n\nInvokes methods on the component library to obtain detection results\n\n\nPopulates and sends the respective responses to the WFM\n\n\n\n\nThe basic pseudocode for the Component Executable is as follows:\n\n\ncomponent_cls = locate_component_class()\ncomponent = component_cls()\n\nwhile True:\n job = receive_job()\n\n if is_image_job(job) and hasattr(component, 'get_detections_from_image'):\n detections = component.get_detections_from_image(job)\n send_job_response(detections)\n\n elif is_video_job(job) and hasattr(component, 'get_detections_from_video'):\n detections = component.get_detections_from_video(job)\n send_job_response(detections)\n\n elif is_audio_job(job) and hasattr(component, 'get_detections_from_audio'):\n detections = component.get_detections_from_audio(job)\n send_job_response(detections)\n\n elif is_generic_job(job) and hasattr(component, 'get_detections_from_generic'):\n detections = component.get_detections_from_generic(job)\n send_job_response(detections)\n\n\n\nEach instance of a Component Executable runs as a separate process.\n\n\nThe Component Executable receives and parses requests from the WFM, invokes methods on the Component Logic to get\ndetection objects, and subsequently populates responses with the component output and sends them to the WFM.\n\n\nA component developer implements a detection component by creating a class that defines one or more of the\nget_detections_from_* methods. See the \nAPI Specification\n for more information.\n\n\nThe figures below present high-level component diagrams of the Python Batch Component API.\nThis figure shows the basic structure:\n\n\n\n\nThe Node Manager is only used in a non-Docker deployment. In a Docker deployment the Component Executor is started by the Docker container itself.\n\n\nThe Component Executor determines that it is running a Python component so it creates an instance of the\n\nPythonComponentHandle\n\nclass. The \nPythonComponentHandle\n class creates an instance of the component class and calls one of the\n\nget_detections_from_*\n methods on the component instance. The example\nabove is an image component, so \nPythonComponentHandle\n calls \nExampleImageFaceDetection.get_detections_from_image\n\non the component instance. The component instance creates an instance of\n\nmpf_component_util.ImageReader\n to access the image. Components that support video\nwould implement \nget_detections_from_video\n and use\n\nmpf_component_util.VideoCapture\n instead.\n\n\nThis figure show the structure when the mixin classes are used:\n\n\n\n\nThe figure above shows a video component, \nExampleVideoFaceDetection\n, that extends the\n\nmpf_component_util.VideoCaptureMixin\n class. \nPythonComponentHandle\n will\ncall \nget_detections_from_video\n on an instance of \nExampleVideoFaceDetection\n. \nExampleVideoFaceDetection\n does not\nimplement \nget_detections_from_video\n, so the implementation inherited from \nmpf_component_util.VideoCaptureMixin\n\ngets called. \nmpf_component_util.VideoCaptureMixin.get_detections_from_video\n creates an instance of\n\nmpf_component_util.VideoCapture\n and calls\n\nExampleVideoFaceDetection.get_detections_from_video_capture\n, passing in the \nmpf_component_util.VideoCapture\n it\njust created. \nExampleVideoFaceDetection.get_detections_from_video_capture\n is where the component reads the video\nusing the passed-in \nmpf_component_util.VideoCapture\n and attempts to find detections. Components that support images\nwould extend \nmpf_component_util.ImageReaderMixin\n, implement\n\nget_detections_from_image_reader\n, and access the image using the passed-in\n\nmpf_component_util.ImageReader\n.\n\n\nDuring component registration a \nvirtualenv\n is created for each component.\nThe virtualenv has access to the built-in Python libraries, but does not have access to any third party packages\nthat might be installed on the system. When creating the virtualenv for a setuptools-based component the only packages\nthat get installed are the component itself and any dependencies specified in the setup.cfg\nfile (including their transitive dependencies). When creating the virtualenv for a basic Python component the only\npackage that gets installed is \nmpf_component_api\n. \nmpf_component_api\n is the package containing the job classes\n(e.g. \nmpf_component_api.ImageJob\n,\n\nmpf_component_api.VideoJob\n) and detection result classes\n(e.g. \nmpf_component_api.ImageLocation\n,\n\nmpf_component_api.VideoTrack\n).\n\n\nHow to Create a Python Component\n\n\nThere are two types of Python components that are supported, setuptools-based components and basic Python components.\nBasic Python components are quicker to set up, but have no built-in support for dependency management.\nAll dependencies must be handled by the developer. Setuptools-based components are recommended since they use\nsetuptools and pip for dependency management.\n\n\nEither way, the end goal is to create a Docker image. This document describes the steps for developing a component\noutside of Docker. Many developers prefer to do that first and then focus on building and running their component\nwithin Docker after they are confident it works in a local environment. Alternatively, some developers feel confident\ndeveloping their component entirely within Docker. When you're ready for the Docker steps, refer to the\n\nREADME\n.\n\n\nGet openmpf-python-component-sdk\n\n\nIn order to create a Python component you will need to clone the\n\nopenmpf-python-component-sdk repository\n if you don't\nalready have it. While not technically required, it is recommended to also clone the\n\nopenmpf-build-tools repository\n.\nThe rest of the steps assume you cloned openmpf-python-component-sdk to\n\n~/openmpf-projects/openmpf-python-component-sdk\n. The rest of the steps also assume that if you cloned the\nopenmpf-build-tools repository, you cloned it to \n~/openmpf-projects/openmpf-build-tools\n.\n\n\nSetup Python Component Libraries\n\n\nThe component packaging steps require that wheel files for \nmpf_component_api\n, \nmpf_component_util\n, and\ntheir dependencies are available in the \n~/mpf-sdk-install/python/wheelhouse\n directory.\n\n\nIf you have openmpf-build-tools, then you can run:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk\n\n\n\nTo setup the libraries manually you can run:\n\n\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/api\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/component_util\n\n\n\nHow to Create a Setuptools-based Python Component\n\n\nIn this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json\n\n\n\n2. Create pyproject.toml file in project's top-level directory:\n\n\npyproject.toml\n should contain the following content:\n\n\n[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n\n\n3. Create setup.cfg file in project's top-level directory:\n\n\nExample of a minimal setup.cfg file:\n\n\n[metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/*\n\n\n\nThe \nname\n parameter defines the distribution name. Typically the distribution name matches the component name.\n\n\nAny dependencies that component requires should be listed in the \ninstall_requires\n field.\n\n\nThe Component Executor looks in the \nentry_points\n element and uses the \nmpf.exported_component\n field to determine\nthe component class. The right hand side of \ncomponent =\n should be the dotted module name, followed by a \n:\n,\nfollowed by the name of the class. The general pattern is\n\n'mpf.exported_component': 'component = .:'\n. In the above example,\n\nMyComponent\n is the class name. The module is listed as \nmy_component.my_component\n because the \nmy_component\n\npackage contains the \nmy_component.py\n file and the \nmy_component.py\n file contains the \nMyComponent\n class.\n\n\nThe \n[options.package_data]\n section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed.\n\n\n4. Create descriptor.json file in MyComponent/plugin-files/descriptor:\n\n\nThe \nbatchLibrary\n field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \n\"batchLibrary\" : \"MyComponent\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n5. Implement your component class:\n\n\nBelow is an example of the structure of a simple component. This component extends\n\nmpf_component_util.VideoCaptureMixin\n to simplify the use of\n\nmpf_component_util.VideoCapture\n. You would replace the call to\n\nrun_detection_algorithm_on_frame\n with your component-specific logic.\n\n\nimport logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track\n\n\n\n6. Optional: Add prebuilt wheel files if not available on PyPi:\n\n\nIf your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's \nplugin-files/wheelhouse\n directory.\nThe prebuilt library names must be listed in your \nsetup.cfg\n file's \ninstall_requires\n field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's \nplugin-files/wheelhouse\n directory.\n\n\n7. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nMyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following commands:\n\n\nmkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n8. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nHow to Create a Basic Python Component\n\n\nIn this example we create a basic Python component that supports video. An example of a basic Python component can be\nfound\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/descriptor\ntouch MyComponent/descriptor/descriptor.json\ntouch MyComponent/my_component.py\n\n\n\n2. Create descriptor.json file in MyComponent/descriptor:\n\n\nThe \nbatchLibrary\n field should be the full path to the Python file containing your component class.\nIn this example the field should be: \n\"batchLibrary\" : \"${MPF_HOME}/plugins/MyComponent/my_component.py\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n3. Implement your component class:\n\n\nBelow is an example of the structure of a simple component that does not use\n\nmpf_component_util.VideoCaptureMixin\n. You would replace the call to\n\nrun_detection_algorithm\n with your component-specific logic.\n\n\nimport logging\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n return run_detection_algorithm(video_job)\n\nEXPORT_MPF_COMPONENT = MyComponent\n\n\n\nThe Component Executor looks for a module-level variable named \nEXPORT_MPF_COMPONENT\n to specify which class\nis the component.\n\n\n4. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following command:\n\n\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n5. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nAPI Specification\n\n\nAn OpenMPF Python component is a class that defines one or more of the get_detections_from_* methods.\n\n\ncomponent.get_detections_from_* methods\n\n\nAll get_detections_from_* methods are invoked through an instance of the component class. The only parameter passed\nin is an appropriate job object (e.g. \nmpf_component_api.ImageJob\n, \nmpf_component_api.VideoJob\n). Since the methods\nare invoked through an instance, instance methods and class methods end up with two arguments, the first is either the\ninstance or the class, respectively. All get_detections_from_* methods can be implemented either as an instance method,\na static method, or a class method.\nFor example:\n\n\ninstance method:\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nstatic method:\n\n\nclass MyComponent:\n @staticmethod\n def get_detections_from_image(image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nclass method:\n\n\nclass MyComponent:\n @classmethod\n def get_detections_from_image(cls, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nAll get_detections_from_* methods must return an iterable of the appropriate detection type\n(e.g. \nmpf_component_api.ImageLocation\n, \nmpf_component_api.VideoTrack\n). The return value is normally a list or generator,\nbut any iterable can be used.\n\n\nImage API\n\n\ncomponent.get_detections_from_image(image_job)\n\n\nUsed to detect objects in an image file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nget_detections_from_image\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nimage_job\n\n\nmpf_component_api.ImageJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.ImageLocation\n\n\n\n\nmpf_component_api.ImageJob\n\n\nClass containing data used for detection of objects in an image file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.jpg\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of the image in pixels\n\n \nFRAME_HEIGHT\n : the height of the image in pixels\n\n \n\n May include the following key-value pairs:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \nHORIZONTAL_FLIP\n : true if the image is mirrored across the Y-axis, otherwise false\n\n \nEXIF_ORIENTATION\n : the standard EXIF orientation tag; a value between 1 and 8\n\n \n\n \n\n \n\n \n\n \nfeed_forward_location\n\n \nNone\n or \nmpf_component_api.ImageLocation\n\n \nAn \nmpf_component_api.ImageLocation\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.ImageLocation\n\n\nClass used to store the location of detected objects in a image file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, x_left_upper, y_left_upper, width, height, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nx_left_upper\n\n\nint\n\n\nUpper left X coordinate of the detected object.\n\n\n\n\n\n\ny_left_upper\n\n\nint\n\n\nUpper left Y coordinate of the detected object.\n\n\n\n\n\n\nwidth\n\n\nint\n\n\nThe width of the detected object.\n\n\n\n\n\n\nheight\n\n\nint\n\n\nThe height of the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nSee here for information about rotation and horizontal flipping.\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\nmpf_component_api.ImageLocation(0, 0, 100, 100, 1.0, {'CLASSIFICATION': 'backpack'})\n\n\n\nmpf_component_util.ImageReader\n\n\nmpf_component_util.ImageReader\n is a utility class for accessing images. It is the image equivalent to\n\nmpf_component_util.VideoCapture\n. Like \nmpf_component_util.VideoCapture\n,\nit may modify the read-in frame data based on job_properties. From the point of view of someone using\n\nmpf_component_util.ImageReader\n, these modifications are mostly transparent. \nmpf_component_util.ImageReader\n makes\nit look like you are reading the original image file as though it has already been rotated, flipped, cropped, etc.\n\n\nOne issue with this approach is that the detection bounding boxes will be relative to the\nmodified frame data, not the original. To make the detections relative to the original image\nthe \nmpf_component_util.ImageReader.reverse_transform(image_location)\n method must be called on each\n\nmpf_component_api.ImageLocation\n. Since the use of \nmpf_component_util.ImageReader\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.ImageReader\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_image(image_job):\n image_reader = mpf_component_util.ImageReader(image_job)\n image = image_reader.get_image()\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_image_locations = run_component_specific_algorithm(image)\n for result in result_image_locations:\n image_reader.reverse_transform(result)\n yield result\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.ImageReaderMixin\n for a more concise way to use\n\nmpf_component_util.ImageReader\n below.\n\n\nmpf_component_util.ImageReaderMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.ImageReader\n.\n\nmpf_component_util.ImageReaderMixin\n takes care of initializing a \nmpf_component_util.ImageReader\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.ImageReaderMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.ImageReaderMixin\n.\n\n\nThe component must implement \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must read the image using the \nmpf_component_util.ImageReader\n\n that is passed in to \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must NOT implement \nget_detections_from_image(image_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.ImageReader.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.ImageReaderMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.ImageReaderMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_image_reader(image_job, image_reader):\n image = image_reader.get_image()\n\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n return run_component_specific_algorithm(image)\n\n\n\nmpf_component_util.ImageReaderMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\n\n\nVideo API\n\n\ncomponent.get_detections_from_video(video_job)\n\n\nUsed to detect objects in a video file. Prior to being sent to the component, videos are split into logical \"segments\"\nof video data and each segment (containing a range of frames) is assigned to a different job. Components are not\nguaranteed to receive requests in any order. For example, the first request processed by a component might receive a\nrequest for frames 300-399 of a Video A, while the next request may cover frames 900-999 of a Video B.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_video(self, video_job):\n return [mpf_component_api.VideoTrack(...), ...]\n\n\n\nget_detections_from_video\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nvideo_job\n\n\nmpf_component_api.VideoJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.VideoTrack\n\n\n\n\nmpf_component_api.VideoJob\n\n\nClass containing data used for detection of objects in a video file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.avi\".\n\n \n\n \n\n \nstart_frame\n\n \nint\n\n \nThe first frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \nstop_frame\n\n \nint\n\n \nThe last frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of video in milliseconds\n\n \nFPS\n : frames per second (averaged for variable frame rate video)\n\n \nFRAME_COUNT\n : the number of frames in the video\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of a frame in pixels\n\n \nFRAME_HEIGHT\n : the height of a frame in pixels\n\n \nHAS_CONSTANT_FRAME_RATE\n : set to true if the video has a constant frame rate; otherwise, omitted or set to false if the video has variable frame rate or the type of frame rate cannot be determined\n\n \n\n May include the following key-value pair:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.VideoTrack\n\n \nAn \nmpf_component_api.VideoTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\n\n\nIMPORTANT:\n \nFRAME_INTERVAL\n is a common job property that many components support.\nFor frame intervals greater than 1, the component must look for detections starting with the first\nframe, and then skip frames as specified by the frame interval, until or before it reaches the stop frame.\nFor example, given a start frame of 0, a stop frame of 99, and a frame interval of 2, then the detection component\nmust look for objects in frames numbered 0, 2, 4, 6, ..., 98.\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.VideoTrack\n\n\nClass used to store the location of detected objects in a video file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_frame, stop_frame, confidence=-1.0, frame_locations=None, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_frame\n\n\nint\n\n\nThe first frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nstop_frame\n\n\nint\n\n\nThe last frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\nframe_locations\n\n\ndict[int, mpf_component_api.ImageLocation]\n\n\nA dict of individual detections. The key for each entry is the frame number where the detection was generated, and the value is a \nmpf_component_api.ImageLocation\n calculated as if that frame was a still image. Note that a key-value pair is \nnot\n required for every frame between the track start frame and track stop frame.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.VideoTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\ntrack = mpf_component_api.VideoTrack(0, 1)\ntrack.frame_locations[0] = mpf_component_api.ImageLocation(0, 0, 100, 100, 0.75, {'CLASSIFICATION': 'backpack'})\ntrack.frame_locations[1] = mpf_component_api.ImageLocation(10, 10, 110, 110, 0.95, {'CLASSIFICATION': 'backpack'})\ntrack.confidence = max(il.confidence for il in track.frame_locations.itervalues())\n\n\n\nmpf_component_util.VideoCapture\n\n\nmpf_component_util.VideoCapture\n is a utility class for reading videos. \nmpf_component_util.VideoCapture\n works very\nsimilarly to \ncv2.VideoCapture\n, except that it might modify the video frames based on job properties. From the point\nof view of someone using \nmpf_component_util.VideoCapture\n, these modifications are mostly transparent.\n\nmpf_component_util.VideoCapture\n makes it look like you are reading the original video file as though it has already\nbeen rotated, flipped, cropped, etc. Also, if frame skipping is enabled, such as by setting the value of the\n\nFRAME_INTERVAL\n job property, it makes it look like you are reading the video as though it never contained the\nskipped frames.\n\n\nOne issue with this approach is that the detection frame numbers and bounding box will be relative to the\nmodified video, not the original. To make the detections relative to the original video\nthe \nmpf_component_util.VideoCapture.reverse_transform(video_track)\n method must be called on each\n\nmpf_component_api.VideoTrack\n. Since the use of \nmpf_component_util.VideoCapture\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.VideoCapture\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n video_capture = mpf_component_util.VideoCapture(video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n video_capture.reverse_transform(track)\n yield track\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.VideoCaptureMixin\n for a more concise way to use\n\nmpf_component_util.VideoCapture\n below.\n\n\nmpf_component_util.VideoCaptureMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.VideoCapture\n.\n\nmpf_component_util.VideoCaptureMixin\n takes care of initializing a \nmpf_component_util.VideoCapture\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.VideoCaptureMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.VideoCaptureMixin\n.\n\n\nThe component must implement \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must read the video using the \nmpf_component_util.VideoCapture\n\n that is passed in to \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must NOT implement \nget_detections_from_video(video_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.VideoCapture.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.VideoCaptureMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_video_capture(video_job, video_capture):\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield track\n\n\n\nmpf_component_util.VideoCaptureMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\nFor example:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin, mpf_component_util.ImageReaderMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n ...\n\n @staticmethod\n def get_detections_from_image_reader(image_job, image_reader):\n ...\n\n\n\nAudio API\n\n\ncomponent.get_detections_from_audio(audio_job)\n\n\nUsed to detect objects in an audio file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_audio(self, audio_job):\n return [mpf_component_api.AudioTrack(...), ...]\n\n\n\nget_detections_from_audio\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\naudio_job\n\n\nmpf_component_api.AudioJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.AudioTrack\n\n\n\n\nmpf_component_api.AudioJob\n\n\nClass containing data used for detection of objects in an audio file.\nCurrently, audio files are not logically segmented, so a job will contain the entirety of the audio file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.mp3\".\n\n \n\n \n\n \nstart_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the beginning of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \nstop_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the end of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of audio file in milliseconds\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.AudioTrack\n\n \nAn \nmpf_component_api.AudioTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.AudioTrack\n\n\nClass used to store the location of detected objects in an audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_time, stop_time, confidence, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event started.\n\n\n\n\n\n\nstop_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event stopped.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.AudioTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\nGeneric API\n\n\ncomponent.get_detections_from_generic(generic_job)\n\n\nUsed to detect objects in files that are not video, image, or audio files. Such files are of the UNKNOWN type and\nhandled generically.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_generic(self, generic_job):\n return [mpf_component_api.GenericTrack(...), ...]\n\n\n\nget_detections_from_generic\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\ngeneric_job\n\n\nmpf_component_api.GenericJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.GenericTrack\n\n\n\n\nmpf_component_api.GenericJob\n\n\nClass containing data used for detection of objects in a file that isn't a video, image, or audio file. The file is not\nlogically segmented, so a job will contain the entirety of the file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.txt\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pair:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.GenericTrack\n\n \nAn \nmpf_component_api.GenericTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.GenericTrack\n\n\nClass used to store the location of detected objects in a file that is not a video, image, or audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nHow to Report Errors\n\n\nThe following is an example of how to throw an exception:\n\n\nimport mpf_component_api as mpf\n\n...\nraise mpf.DetectionError.MISSING_PROPERTY.exception(\n 'The REALLY_IMPORTANT property must be provided as a job property.')\n\n\n\nThe Python Batch Component API supports all of the same error types\nlisted \nhere\n for the C++ Batch Component API. Be sure to omit\nthe \nMPF_\n prefix. You can replace the \nMISSING_PROPERTY\n part in the above code with any other error type. When\ngenerating an exception, choose the type that best describes your error.\n\n\nPython Component Build Environment\n\n\nAll Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof \n----.whl\n.\n\n--\n are called\n\ncompatibility tags\n. For example,\n\nmpf_component_api\n is pure Python, so the name of its wheel file is\n\nmpf_component_api-0.1-py3-none-any.whl\n. \npy3\n means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. \nnone\n means that it does not use the Python ABI. \nany\n means it will\nwork on any platform.\n\n\nThe acceptable Python version tags are:\n\n\n\n\ncp312\n (or lower)\n\n\npy312\n (or lower)\n\n\n\n\nThe \nONLY\n acceptable ABI tags are:\n\n\n\n\ncp312\n\n\nabi3\n\n\nnone\n\n\n\n\nThe acceptable platform tags are:\n\n\n\n\nany\n\n\nlinux_x86_64\n\n\nmanylinux2010_x86_64\n\n\nmanylinux2014_x86_64\n\n\nmanylinux1_x86_64\n\n\nmanylinux_2_5_x86_64\n through \nmanylinux_2_31_x86_64\n\n\n\n\nComponents should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.\n\n\nComponent Development Best Practices\n\n\nSingle-threaded Operation\n\n\nImplementations are encouraged to operate in single-threaded mode. OpenMPF will parallelize components through\nmultiple instantiations of the component, each running as a separate service.\n\n\nStateless Behavior\n\n\nOpenMPF components should be stateless in operation and give identical output for a provided input\n(i.e. when processing the same job).\n\n\nLogging\n\n\nIt recommended that components use Python's built-in\n\nlogging\n module.\n The component should\n\nimport logging\n and call \nlogging.getLogger('')\n to get a logger instance.\nThe component should not configure logging itself. The Component Executor will configure the\n\nlogging\n module for the component. The logger will write log messages to standard error and\n\n${MPF_LOG_PATH}/${THIS_MPF_NODE}/log/.log\n. Note that multiple instances of the\nsame component can log to the same file. Also, logging content can span multiple lines.\n\n\nThe following log levels are supported: \nFATAL, ERROR, WARN, INFO, DEBUG\n.\nThe \nLOG_LEVEL\n environment variable can be set to one of the log levels to change the logging\nverbosity. When \nLOG_LEVEL\n is absent, \nINFO\n is used.\n\n\nThe format of the log messages is:\n\n\nDATE TIME LEVEL [SOURCE_FILE:LINE_NUMBER] - MESSAGE\n\n\n\nFor example:\n\n\n2018-05-03 14:41:11,703 INFO [test_component.py:44] - Logged message",
+ "text": "NOTICE:\n This software (or technical data) was produced for the U.S. Government under contract, and is subject to the\nRights in Data-General Clause 52.227-14, Alt. IV (DEC 2007). Copyright 2024 The MITRE Corporation. All Rights Reserved.\n\n\nAPI Overview\n\n\nIn OpenMPF, a \ncomponent\n is a plugin that receives jobs (containing media), processes that media, and returns results.\n\n\nThe OpenMPF Batch Component API currently supports the development of \ndetection components\n, which are used detect\nobjects in image, video, audio, or other (generic) files that reside on disk.\n\n\nUsing this API, detection components can be built to provide:\n\n\n\n\nDetection (Localizing an object)\n\n\nTracking (Localizing an object across multiple frames)\n\n\nClassification (Detecting the type of object and optionally localizing that object)\n\n\nTranscription (Detecting speech and transcribing it into text)\n\n\n\n\nHow Components Integrate into OpenMPF\n\n\nComponents are integrated into OpenMPF through the use of OpenMPF's \nComponent Executable\n.\nDevelopers create component libraries that encapsulate the component detection logic.\nEach instance of the Component Executable loads one of these libraries and uses it to service job requests\nsent by the OpenMPF Workflow Manager (WFM).\n\n\nThe Component Executable:\n\n\n\n\nReceives and parses job requests from the WFM\n\n\nInvokes methods on the component library to obtain detection results\n\n\nPopulates and sends the respective responses to the WFM\n\n\n\n\nThe basic pseudocode for the Component Executable is as follows:\n\n\ncomponent_cls = locate_component_class()\ncomponent = component_cls()\n\nwhile True:\n job = receive_job()\n\n if is_image_job(job) and hasattr(component, 'get_detections_from_image'):\n detections = component.get_detections_from_image(job)\n send_job_response(detections)\n\n elif is_video_job(job) and hasattr(component, 'get_detections_from_video'):\n detections = component.get_detections_from_video(job)\n send_job_response(detections)\n\n elif is_audio_job(job) and hasattr(component, 'get_detections_from_audio'):\n detections = component.get_detections_from_audio(job)\n send_job_response(detections)\n\n elif is_generic_job(job) and hasattr(component, 'get_detections_from_generic'):\n detections = component.get_detections_from_generic(job)\n send_job_response(detections)\n\n\n\nEach instance of a Component Executable runs as a separate process.\n\n\nThe Component Executable receives and parses requests from the WFM, invokes methods on the Component Logic to get\ndetection objects, and subsequently populates responses with the component output and sends them to the WFM.\n\n\nA component developer implements a detection component by creating a class that defines one or more of the\nget_detections_from_* methods. See the \nAPI Specification\n for more information.\n\n\nThe figures below present high-level component diagrams of the Python Batch Component API.\nThis figure shows the basic structure:\n\n\n\n\nThe Node Manager is only used in a non-Docker deployment. In a Docker deployment the Component Executor is started by the Docker container itself.\n\n\nThe Component Executor determines that it is running a Python component so it creates an instance of the\n\nPythonComponentHandle\n\nclass. The \nPythonComponentHandle\n class creates an instance of the component class and calls one of the\n\nget_detections_from_*\n methods on the component instance. The example\nabove is an image component, so \nPythonComponentHandle\n calls \nExampleImageFaceDetection.get_detections_from_image\n\non the component instance. The component instance creates an instance of\n\nmpf_component_util.ImageReader\n to access the image. Components that support video\nwould implement \nget_detections_from_video\n and use\n\nmpf_component_util.VideoCapture\n instead.\n\n\nThis figure show the structure when the mixin classes are used:\n\n\n\n\nThe figure above shows a video component, \nExampleVideoFaceDetection\n, that extends the\n\nmpf_component_util.VideoCaptureMixin\n class. \nPythonComponentHandle\n will\ncall \nget_detections_from_video\n on an instance of \nExampleVideoFaceDetection\n. \nExampleVideoFaceDetection\n does not\nimplement \nget_detections_from_video\n, so the implementation inherited from \nmpf_component_util.VideoCaptureMixin\n\ngets called. \nmpf_component_util.VideoCaptureMixin.get_detections_from_video\n creates an instance of\n\nmpf_component_util.VideoCapture\n and calls\n\nExampleVideoFaceDetection.get_detections_from_video_capture\n, passing in the \nmpf_component_util.VideoCapture\n it\njust created. \nExampleVideoFaceDetection.get_detections_from_video_capture\n is where the component reads the video\nusing the passed-in \nmpf_component_util.VideoCapture\n and attempts to find detections. Components that support images\nwould extend \nmpf_component_util.ImageReaderMixin\n, implement\n\nget_detections_from_image_reader\n, and access the image using the passed-in\n\nmpf_component_util.ImageReader\n.\n\n\nDuring component registration a \nvirtualenv\n is created for each component.\nThe virtualenv has access to the built-in Python libraries, but does not have access to any third party packages\nthat might be installed on the system. When creating the virtualenv for a setuptools-based component the only packages\nthat get installed are the component itself and any dependencies specified in the setup.cfg\nfile (including their transitive dependencies). When creating the virtualenv for a basic Python component the only\npackage that gets installed is \nmpf_component_api\n. \nmpf_component_api\n is the package containing the job classes\n(e.g. \nmpf_component_api.ImageJob\n,\n\nmpf_component_api.VideoJob\n) and detection result classes\n(e.g. \nmpf_component_api.ImageLocation\n,\n\nmpf_component_api.VideoTrack\n).\n\n\nHow to Create a Python Component\n\n\nThere are two types of Python components that are supported, setuptools-based components and basic Python components.\nBasic Python components are quicker to set up, but have no built-in support for dependency management.\nAll dependencies must be handled by the developer. Setuptools-based components are recommended since they use\nsetuptools and pip for dependency management.\n\n\nEither way, the end goal is to create a Docker image. This document describes the steps for developing a component\noutside of Docker. Many developers prefer to do that first and then focus on building and running their component\nwithin Docker after they are confident it works in a local environment. Alternatively, some developers feel confident\ndeveloping their component entirely within Docker. When you're ready for the Docker steps, refer to the\n\nREADME\n.\n\n\nGet openmpf-python-component-sdk\n\n\nIn order to create a Python component you will need to clone the\n\nopenmpf-python-component-sdk repository\n if you don't\nalready have it. While not technically required, it is recommended to also clone the\n\nopenmpf-build-tools repository\n.\nThe rest of the steps assume you cloned openmpf-python-component-sdk to\n\n~/openmpf-projects/openmpf-python-component-sdk\n. The rest of the steps also assume that if you cloned the\nopenmpf-build-tools repository, you cloned it to \n~/openmpf-projects/openmpf-build-tools\n.\n\n\nSetup Python Component Libraries\n\n\nThe component packaging steps require that wheel files for \nmpf_component_api\n, \nmpf_component_util\n, and\ntheir dependencies are available in the \n~/mpf-sdk-install/python/wheelhouse\n directory.\n\n\nIf you have openmpf-build-tools, then you can run:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk\n\n\n\nTo setup the libraries manually you can run:\n\n\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/api\npip3 wheel -w ~/mpf-sdk-install/python/wheelhouse ~/openmpf-projects/openmpf-python-component-sdk/detection/component_util\n\n\n\nHow to Create a Setuptools-based Python Component\n\n\nIn this example we create a setuptools-based video component named \"MyComponent\". An example of a setuptools-based\nPython component can be found\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 pyproject.toml\n\u251c\u2500\u2500 setup.cfg\n\u251c\u2500\u2500 component_name\n\u2502 \u251c\u2500\u2500 __init__.py\n\u2502 \u2514\u2500\u2500 component_name.py\n\u2514\u2500\u2500 plugin-files\n \u251c\u2500\u2500 descriptor\n \u2502 \u2514\u2500\u2500 descriptor.json\n \u2514\u2500\u2500 wheelhouse # optional\n \u2514\u2500\u2500 my_prebuilt_lib-0.1-py3-none-any.whl\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/my_component\nmkdir -p MyComponent/plugin-files/descriptor\ntouch MyComponent/pyproject.toml\ntouch MyComponent/setup.cfg\ntouch MyComponent/my_component/__init__.py\ntouch MyComponent/my_component/my_component.py\ntouch MyComponent/plugin-files/descriptor/descriptor.json\n\n\n\n2. Create pyproject.toml file in project's top-level directory:\n\n\npyproject.toml\n should contain the following content:\n\n\n[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n\n\n3. Create setup.cfg file in project's top-level directory:\n\n\nExample of a minimal setup.cfg file:\n\n\n[metadata]\nname = MyComponent\nversion = 0.1\n\n[options]\npackages = my_component\ninstall_requires =\n mpf_component_api>=0.1\n mpf_component_util>=0.1\n\n[options.entry_points]\nmpf.exported_component =\n component = my_component.my_component:MyComponent\n\n[options.package_data]\nmy_component=models/*\n\n\n\nThe \nname\n parameter defines the distribution name. Typically the distribution name matches the component name.\n\n\nAny dependencies that component requires should be listed in the \ninstall_requires\n field.\n\n\nThe Component Executor looks in the \nentry_points\n element and uses the \nmpf.exported_component\n field to determine\nthe component class. The right hand side of \ncomponent =\n should be the dotted module name, followed by a \n:\n,\nfollowed by the name of the class. The general pattern is\n\n'mpf.exported_component': 'component = .:'\n. In the above example,\n\nMyComponent\n is the class name. The module is listed as \nmy_component.my_component\n because the \nmy_component\n\npackage contains the \nmy_component.py\n file and the \nmy_component.py\n file contains the \nMyComponent\n class.\n\n\nThe \n[options.package_data]\n section is optional. It should be used when there are non-Python files\nin a package directory that should be included when the component is installed.\n\n\n4. Create descriptor.json file in MyComponent/plugin-files/descriptor:\n\n\nThe \nbatchLibrary\n field should match the distribution name from the setup.cfg file. In this example the\nfield should be: \n\"batchLibrary\" : \"MyComponent\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n5. Implement your component class:\n\n\nBelow is an example of the structure of a simple component. This component extends\n\nmpf_component_util.VideoCaptureMixin\n to simplify the use of\n\nmpf_component_util.VideoCapture\n. You would replace the call to\n\nrun_detection_algorithm_on_frame\n with your component-specific logic.\n\n\nimport logging\n\nimport mpf_component_api as mpf\nimport mpf_component_util as mpf_util\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent(mpf_util.VideoCaptureMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n for result_track in run_detection_algorithm_on_frame(frame_index, frame):\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield result_track\n\n\n\n6. Optional: Add prebuilt wheel files if not available on PyPi:\n\n\nIf your component depends on Python libraries that are not available on PyPi, the libraries can be manually added to\nyour project. The prebuilt libraries must be placed in your project's \nplugin-files/wheelhouse\n directory.\nThe prebuilt library names must be listed in your \nsetup.cfg\n file's \ninstall_requires\n field.\nIf any of the prebuilt libraries have transitive dependencies that are not available on PyPi, then those libraries\nmust also be added to your project's \nplugin-files/wheelhouse\n directory.\n\n\n7. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nMyComponent\n\u251c\u2500\u2500 descriptor\n\u2502 \u2514\u2500\u2500 descriptor.json\n\u2514\u2500\u2500 wheelhouse\n \u251c\u2500\u2500 MyComponent-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_api-0.1-py3-none-any.whl\n \u251c\u2500\u2500 mpf_component_util-0.1-py3-none-any.whl\n \u251c\u2500\u2500 numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl\n \u2514\u2500\u2500 opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -psdk ~/openmpf-projects/openmpf-python-component-sdk -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following commands:\n\n\nmkdir -p plugin-packages/MyComponent/wheelhouse\ncp -r MyComponent/plugin-files/* plugin-packages/MyComponent/\npip3 wheel -w plugin-packages/MyComponent/wheelhouse -f ~/mpf-sdk-install/python/wheelhouse -f plugin-packages/MyComponent/wheelhouse ./MyComponent/\ncd plugin-packages\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n8. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nHow to Create a Basic Python Component\n\n\nIn this example we create a basic Python component that supports video. An example of a basic Python component can be\nfound\n\nhere\n.\n\n\nThis is the recommended project structure:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\n1. Create directory structure:\n\n\nmkdir MyComponent\nmkdir MyComponent/descriptor\ntouch MyComponent/descriptor/descriptor.json\ntouch MyComponent/my_component.py\n\n\n\n2. Create descriptor.json file in MyComponent/descriptor:\n\n\nThe \nbatchLibrary\n field should be the full path to the Python file containing your component class.\nIn this example the field should be: \n\"batchLibrary\" : \"${MPF_HOME}/plugins/MyComponent/my_component.py\"\n.\nSee the \nComponent Descriptor Reference\n for details about\nthe descriptor format.\n\n\n3. Implement your component class:\n\n\nBelow is an example of the structure of a simple component that does not use\n\nmpf_component_util.VideoCaptureMixin\n. You would replace the call to\n\nrun_detection_algorithm\n with your component-specific logic.\n\n\nimport logging\n\nlogger = logging.getLogger('MyComponent')\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n logger.info('[%s] Received video job: %s', video_job.job_name, video_job)\n return run_detection_algorithm(video_job)\n\nEXPORT_MPF_COMPONENT = MyComponent\n\n\n\nThe Component Executor looks for a module-level variable named \nEXPORT_MPF_COMPONENT\n to specify which class\nis the component.\n\n\n4. Optional: Create the plugin package for non-Docker deployments:\n\n\nThe directory structure of the .tar.gz file will be:\n\n\nComponentName\n\u251c\u2500\u2500 component_name.py\n\u251c\u2500\u2500 dependency.py\n\u2514\u2500\u2500 descriptor\n \u2514\u2500\u2500 descriptor.json\n\n\n\nTo create the plugin packages you can run the build script as follows:\n\n\n~/openmpf-projects/openmpf-build-tools/build-openmpf-components/build_components.py -c MyComponent\n\n\n\nThe plugin package can also be built manually using the following command:\n\n\ntar -zcf MyComponent.tar.gz MyComponent\n\n\n\n5. Create the component Docker image:\n\n\nSee the \nREADME\n.\n\n\nAPI Specification\n\n\nAn OpenMPF Python component is a class that defines one or more of the get_detections_from_* methods.\n\n\ncomponent.get_detections_from_* methods\n\n\nAll get_detections_from_* methods are invoked through an instance of the component class. The only parameter passed\nin is an appropriate job object (e.g. \nmpf_component_api.ImageJob\n, \nmpf_component_api.VideoJob\n). Since the methods\nare invoked through an instance, instance methods and class methods end up with two arguments, the first is either the\ninstance or the class, respectively. All get_detections_from_* methods can be implemented either as an instance method,\na static method, or a class method.\nFor example:\n\n\ninstance method:\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nstatic method:\n\n\nclass MyComponent:\n @staticmethod\n def get_detections_from_image(image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nclass method:\n\n\nclass MyComponent:\n @classmethod\n def get_detections_from_image(cls, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nAll get_detections_from_* methods must return an iterable of the appropriate detection type\n(e.g. \nmpf_component_api.ImageLocation\n, \nmpf_component_api.VideoTrack\n). The return value is normally a list or generator,\nbut any iterable can be used.\n\n\nImage API\n\n\ncomponent.get_detections_from_image(image_job)\n\n\nUsed to detect objects in an image file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_image(self, image_job):\n return [mpf_component_api.ImageLocation(...), ...]\n\n\n\nget_detections_from_image\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nimage_job\n\n\nmpf_component_api.ImageJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.ImageLocation\n\n\n\n\nmpf_component_api.ImageJob\n\n\nClass containing data used for detection of objects in an image file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.jpg\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of the image in pixels\n\n \nFRAME_HEIGHT\n : the height of the image in pixels\n\n \n\n May include the following key-value pairs:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \nHORIZONTAL_FLIP\n : true if the image is mirrored across the Y-axis, otherwise false\n\n \nEXIF_ORIENTATION\n : the standard EXIF orientation tag; a value between 1 and 8\n\n \n\n \n\n \n\n \n\n \nfeed_forward_location\n\n \nNone\n or \nmpf_component_api.ImageLocation\n\n \nAn \nmpf_component_api.ImageLocation\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.ImageLocation\n\n\nClass used to store the location of detected objects in a image file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, x_left_upper, y_left_upper, width, height, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nx_left_upper\n\n\nint\n\n\nUpper left X coordinate of the detected object.\n\n\n\n\n\n\ny_left_upper\n\n\nint\n\n\nUpper left Y coordinate of the detected object.\n\n\n\n\n\n\nwidth\n\n\nint\n\n\nThe width of the detected object.\n\n\n\n\n\n\nheight\n\n\nint\n\n\nThe height of the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nSee here for information about rotation and horizontal flipping.\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\nmpf_component_api.ImageLocation(0, 0, 100, 100, 1.0, {'CLASSIFICATION': 'backpack'})\n\n\n\nmpf_component_util.ImageReader\n\n\nmpf_component_util.ImageReader\n is a utility class for accessing images. It is the image equivalent to\n\nmpf_component_util.VideoCapture\n. Like \nmpf_component_util.VideoCapture\n,\nit may modify the read-in frame data based on job_properties. From the point of view of someone using\n\nmpf_component_util.ImageReader\n, these modifications are mostly transparent. \nmpf_component_util.ImageReader\n makes\nit look like you are reading the original image file as though it has already been rotated, flipped, cropped, etc.\n\n\nOne issue with this approach is that the detection bounding boxes will be relative to the\nmodified frame data, not the original. To make the detections relative to the original image\nthe \nmpf_component_util.ImageReader.reverse_transform(image_location)\n method must be called on each\n\nmpf_component_api.ImageLocation\n. Since the use of \nmpf_component_util.ImageReader\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.ImageReader\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_image(image_job):\n image_reader = mpf_component_util.ImageReader(image_job)\n image = image_reader.get_image()\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_image_locations = run_component_specific_algorithm(image)\n for result in result_image_locations:\n image_reader.reverse_transform(result)\n yield result\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.ImageReaderMixin\n for a more concise way to use\n\nmpf_component_util.ImageReader\n below.\n\n\nmpf_component_util.ImageReaderMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.ImageReader\n.\n\nmpf_component_util.ImageReaderMixin\n takes care of initializing a \nmpf_component_util.ImageReader\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.ImageReaderMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.ImageReaderMixin\n.\n\n\nThe component must implement \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must read the image using the \nmpf_component_util.ImageReader\n\n that is passed in to \nget_detections_from_image_reader(image_job, image_reader)\n.\n\n\nThe component must NOT implement \nget_detections_from_image(image_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.ImageReader.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.ImageReaderMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.ImageReaderMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_image_reader(image_job, image_reader):\n image = image_reader.get_image()\n\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n return run_component_specific_algorithm(image)\n\n\n\nmpf_component_util.ImageReaderMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\n\n\nVideo API\n\n\ncomponent.get_detections_from_video(video_job)\n\n\nUsed to detect objects in a video file. Prior to being sent to the component, videos are split into logical \"segments\"\nof video data and each segment (containing a range of frames) is assigned to a different job. Components are not\nguaranteed to receive requests in any order. For example, the first request processed by a component might receive a\nrequest for frames 300-399 of a Video A, while the next request may cover frames 900-999 of a Video B.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_video(self, video_job):\n return [mpf_component_api.VideoTrack(...), ...]\n\n\n\nget_detections_from_video\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nvideo_job\n\n\nmpf_component_api.VideoJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.VideoTrack\n\n\n\n\nmpf_component_api.VideoJob\n\n\nClass containing data used for detection of objects in a video file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.avi\".\n\n \n\n \n\n \nstart_frame\n\n \nint\n\n \nThe first frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \nstop_frame\n\n \nint\n\n \nThe last frame number (0-based index) of the video that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of video in milliseconds\n\n \nFPS\n : frames per second (averaged for variable frame rate video)\n\n \nFRAME_COUNT\n : the number of frames in the video\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \nFRAME_WIDTH\n : the width of a frame in pixels\n\n \nFRAME_HEIGHT\n : the height of a frame in pixels\n\n \nHAS_CONSTANT_FRAME_RATE\n : set to true if the video has a constant frame rate; otherwise, omitted or set to false if the video has variable frame rate or the type of frame rate cannot be determined\n\n \n\n May include the following key-value pair:\n \n\n \nROTATION\n : A floating point value in the interval \n[0.0, 360.0)\n indicating the orientation of the media in degrees in the counter-clockwise direction. In order to view the media in the upright orientation, it must be rotated the given number of degrees in the clockwise direction.\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.VideoTrack\n\n \nAn \nmpf_component_api.VideoTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\n\n\nIMPORTANT:\n \nFRAME_INTERVAL\n is a common job property that many components support.\nFor frame intervals greater than 1, the component must look for detections starting with the first\nframe, and then skip frames as specified by the frame interval, until or before it reaches the stop frame.\nFor example, given a start frame of 0, a stop frame of 99, and a frame interval of 2, then the detection component\nmust look for objects in frames numbered 0, 2, 4, 6, ..., 98.\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.VideoTrack\n\n\nClass used to store the location of detected objects in a video file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_frame, stop_frame, confidence=-1.0, frame_locations=None, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_frame\n\n\nint\n\n\nThe first frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nstop_frame\n\n\nint\n\n\nThe last frame number (0-based index) that contained the detected object.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\nframe_locations\n\n\ndict[int, mpf_component_api.ImageLocation]\n\n\nA dict of individual detections. The key for each entry is the frame number where the detection was generated, and the value is a \nmpf_component_api.ImageLocation\n calculated as if that frame was a still image. Note that a key-value pair is \nnot\n required for every frame between the track start frame and track stop frame.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.VideoTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\n\n\nExample:\n\n\n\n\nA component that performs generic object classification can add an entry to \ndetection_properties\n where the key is\n\nCLASSIFICATION\n and the value is the type of object detected.\n\n\ntrack = mpf_component_api.VideoTrack(0, 1)\ntrack.frame_locations[0] = mpf_component_api.ImageLocation(0, 0, 100, 100, 0.75, {'CLASSIFICATION': 'backpack'})\ntrack.frame_locations[1] = mpf_component_api.ImageLocation(10, 10, 110, 110, 0.95, {'CLASSIFICATION': 'backpack'})\ntrack.confidence = max(il.confidence for il in track.frame_locations.itervalues())\n\n\n\nmpf_component_util.VideoCapture\n\n\nmpf_component_util.VideoCapture\n is a utility class for reading videos. \nmpf_component_util.VideoCapture\n works very\nsimilarly to \ncv2.VideoCapture\n, except that it might modify the video frames based on job properties. From the point\nof view of someone using \nmpf_component_util.VideoCapture\n, these modifications are mostly transparent.\n\nmpf_component_util.VideoCapture\n makes it look like you are reading the original video file as though it has already\nbeen rotated, flipped, cropped, etc. Also, if frame skipping is enabled, such as by setting the value of the\n\nFRAME_INTERVAL\n job property, it makes it look like you are reading the video as though it never contained the\nskipped frames.\n\n\nOne issue with this approach is that the detection frame numbers and bounding box will be relative to the\nmodified video, not the original. To make the detections relative to the original video\nthe \nmpf_component_util.VideoCapture.reverse_transform(video_track)\n method must be called on each\n\nmpf_component_api.VideoTrack\n. Since the use of \nmpf_component_util.VideoCapture\n is optional, the framework\ncannot automatically perform the reverse transform for the developer.\n\n\nThe general pattern for using \nmpf_component_util.VideoCapture\n is as follows:\n\n\nclass MyComponent:\n\n @staticmethod\n def get_detections_from_video(video_job):\n video_capture = mpf_component_util.VideoCapture(video_job)\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n video_capture.reverse_transform(track)\n yield track\n\n\n\nAlternatively, see the documentation for \nmpf_component_util.VideoCaptureMixin\n for a more concise way to use\n\nmpf_component_util.VideoCapture\n below.\n\n\nmpf_component_util.VideoCaptureMixin\n\n\nA mixin class that can be used to simplify the usage of \nmpf_component_util.VideoCapture\n.\n\nmpf_component_util.VideoCaptureMixin\n takes care of initializing a \nmpf_component_util.VideoCapture\n and\nperforming the reverse transform.\n\n\nThere are some requirements to properly use \nmpf_component_util.VideoCaptureMixin\n:\n\n\n\n\nThe component must extend \nmpf_component_util.VideoCaptureMixin\n.\n\n\nThe component must implement \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must read the video using the \nmpf_component_util.VideoCapture\n\n that is passed in to \nget_detections_from_video_capture(video_job, video_capture)\n.\n\n\nThe component must NOT implement \nget_detections_from_video(video_job)\n.\n\n\nThe component must NOT call \nmpf_component_util.VideoCapture.reverse_transform\n.\n\n\n\n\nThe general pattern for using \nmpf_component_util.VideoCaptureMixin\n is as follows:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin):\n\n @staticmethod # Can also be a regular instance method or a class method\n def get_detections_from_video_capture(video_job, video_capture):\n # If frame index is not required, you can just loop over video_capture directly\n for frame_index, frame in enumerate(video_capture):\n # run_component_specific_algorithm is a placeholder for this example.\n # Replace run_component_specific_algorithm with your component's detection logic\n result_tracks = run_component_specific_algorithm(frame_index, frame)\n for track in result_tracks:\n # Alternatively, while iterating through the video, add tracks to a list. When done, return that list.\n yield track\n\n\n\nmpf_component_util.VideoCaptureMixin\n is a mixin class so it is designed in a way that does not prevent the subclass\nfrom extending other classes. If a component supports both videos and images, and it uses\n\nmpf_component_util.VideoCaptureMixin\n, it should also use\n\nmpf_component_util.ImageReaderMixin\n.\nFor example:\n\n\nclass MyComponent(mpf_component_util.VideoCaptureMixin, mpf_component_util.ImageReaderMixin):\n\n @staticmethod\n def get_detections_from_video_capture(video_job, video_capture):\n ...\n\n @staticmethod\n def get_detections_from_image_reader(image_job, image_reader):\n ...\n\n\n\nAudio API\n\n\ncomponent.get_detections_from_audio(audio_job)\n\n\nUsed to detect objects in an audio file.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_audio(self, audio_job):\n return [mpf_component_api.AudioTrack(...), ...]\n\n\n\nget_detections_from_audio\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\naudio_job\n\n\nmpf_component_api.AudioJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.AudioTrack\n\n\n\n\nmpf_component_api.AudioJob\n\n\nClass containing data used for detection of objects in an audio file.\nCurrently, audio files are not logically segmented, so a job will contain the entirety of the audio file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.mp3\".\n\n \n\n \n\n \nstart_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the beginning of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \nstop_time\n\n \nint\n\n \nThe time (0-based index, in milliseconds) associated with the end of the segment of the audio file that should be processed to look for detections.\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pairs:\n \n\n \nDURATION\n : length of audio file in milliseconds\n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.AudioTrack\n\n \nAn \nmpf_component_api.AudioTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.AudioTrack\n\n\nClass used to store the location of detected objects in an audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, start_time, stop_time, confidence, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nstart_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event started.\n\n\n\n\n\n\nstop_time\n\n\nint\n\n\nThe time (0-based index, in ms) when the audio detection event stopped.\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\n\n\nNOTE:\n Currently, \nmpf_component_api.AudioTrack.detection_properties\n do not show up in the JSON output object or\nare used by the WFM in any way.\n\n\n\n\nGeneric API\n\n\ncomponent.get_detections_from_generic(generic_job)\n\n\nUsed to detect objects in files that are not video, image, or audio files. Such files are of the UNKNOWN type and\nhandled generically.\n\n\n\n\nMethod Definition:\n\n\n\n\nclass MyComponent:\n def get_detections_from_generic(self, generic_job):\n return [mpf_component_api.GenericTrack(...), ...]\n\n\n\nget_detections_from_generic\n, like all get_detections_from_* methods, can be implemented either as an instance method,\na static method, or a class method.\n\n\n\n\nParameters:\n\n\n\n\n\n\n\n\n\n\nParameter\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\ngeneric_job\n\n\nmpf_component_api.GenericJob\n\n\nObject containing details about the work to be performed.\n\n\n\n\n\n\n\n\n\n\nReturns: An iterable of \nmpf_component_api.GenericTrack\n\n\n\n\nmpf_component_api.GenericJob\n\n\nClass containing data used for detection of objects in a file that isn't a video, image, or audio file. The file is not\nlogically segmented, so a job will contain the entirety of the file.\n\n\n\n\nMembers:\n\n\n\n\n\n \n\n \n\n \nMember\n\n \nData Type\n\n \nDescription\n\n \n\n \n\n \n\n \n\n \njob_name\n\n \nstr\n\n \nA specific name given to the job by the OpenMPF framework. This value may be used, for example, for logging and debugging purposes.\n\n \n\n \n\n \ndata_uri\n\n \nstr\n\n \nThe URI of the input media file to be processed. Currently, this is a file path. For example, \"/opt/mpf/share/remote-media/test-file.txt\".\n\n \n\n \n\n \njob_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n which represent the property name and the property value. The key corresponds to the property name specified in the component descriptor file described in the \nComponent Descriptor Reference\n. Values are determined when creating a pipeline or when submitting a job.\n \n\n Note: The job_properties dict may not contain the full set of job properties. For properties not contained in the dict, the component must use a default value.\n \n\n \n\n \n\n \nmedia_properties\n\n \ndict[str, str]\n\n \n\n Contains a dict with keys and values of type \nstr\n of metadata about the media associated with the job.\n \n\n Includes the following key-value pair:\n \n\n \nMIME_TYPE\n : the MIME type of the media\n\n \n\n \n\n \n\n \n\n \nfeed_forward_track\n\n \nNone\n or \nmpf_component_api.GenericTrack\n\n \nAn \nmpf_component_api.GenericTrack\n from the previous pipeline stage. Provided when feed forward is enabled. See \nFeed Forward Guide\n.\n\n \n\n \n\n\n\n\n\nJob properties can also be set through environment variables prefixed with \nMPF_PROP_\n. This allows\nusers to set job properties in their\n\ndocker-compose files.\n\nThese will take precedence over all other property types (job, algorithm, media, etc). It is not\npossible to change the value of properties set via environment variables at runtime and therefore\nthey should only be used to specify properties that will not change throughout the entire lifetime\nof the service (e.g. Docker container).\n\n\nmpf_component_api.GenericTrack\n\n\nClass used to store the location of detected objects in a file that is not a video, image, or audio file.\n\n\n\n\nConstructor:\n\n\n\n\ndef __init__(self, confidence=-1.0, detection_properties=None):\n ...\n\n\n\n\n\nMembers:\n\n\n\n\n\n\n\n\n\n\nMember\n\n\nData Type\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\nconfidence\n\n\nfloat\n\n\nRepresents the \"quality\" of the detection. The range depends on the detection algorithm. 0.0 is lowest quality. Higher values are higher quality. Using a standard range of [0.0 - 1.0] is advised. If the component is unable to supply a confidence value, it should return -1.0.\n\n\n\n\n\n\ndetection_properties\n\n\ndict[str, str]\n\n\nA dict with keys and values of type \nstr\n containing optional additional information about the detected object. For best practice, keys should be in all CAPS.\n\n\n\n\n\n\n\n\nHow to Report Errors\n\n\nThe following is an example of how to throw an exception:\n\n\nimport mpf_component_api as mpf\n\n...\nraise mpf.DetectionError.MISSING_PROPERTY.exception(\n 'The REALLY_IMPORTANT property must be provided as a job property.')\n\n\n\nThe Python Batch Component API supports all of the same error types\nlisted \nhere\n for the C++ Batch Component API. Be sure to omit\nthe \nMPF_\n prefix. You can replace the \nMISSING_PROPERTY\n part in the above code with any other error type. When\ngenerating an exception, choose the type that best describes your error.\n\n\nPython Component Build Environment\n\n\nAll Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof \n----.whl\n.\n\n--\n are called\n\ncompatibility tags\n. For example,\n\nmpf_component_api\n is pure Python, so the name of its wheel file is\n\nmpf_component_api-0.1-py3-none-any.whl\n. \npy3\n means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. \nnone\n means that it does not use the Python ABI. \nany\n means it will\nwork on any platform.\n\n\nThe acceptable Python version tags are:\n\n\n\n\ncp312\n (or lower)\n\n\npy312\n (or lower)\n\n\n\n\nThe \nONLY\n acceptable ABI tags are:\n\n\n\n\ncp312\n\n\nabi3\n\n\nnone\n\n\n\n\nThe acceptable platform tags are:\n\n\n\n\nany\n\n\nlinux_x86_64\n\n\nmanylinux1_x86_64\n\n\nmanylinux2010_x86_64\n\n\nmanylinux2014_x86_64\n\n\nmanylinux_2_5_x86_64\n through \nmanylinux_2_39_x86_64\n\n\n\n\nThe full list of compatible tags can be listed by running: \npip3 debug --verbose\n\n\nComponents should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.\n\n\nComponent Development Best Practices\n\n\nSingle-threaded Operation\n\n\nImplementations are encouraged to operate in single-threaded mode. OpenMPF will parallelize components through\nmultiple instantiations of the component, each running as a separate service.\n\n\nStateless Behavior\n\n\nOpenMPF components should be stateless in operation and give identical output for a provided input\n(i.e. when processing the same job).\n\n\nLogging\n\n\nIt recommended that components use Python's built-in\n\nlogging\n module.\n The component should\n\nimport logging\n and call \nlogging.getLogger('')\n to get a logger instance.\nThe component should not configure logging itself. The Component Executor will configure the\n\nlogging\n module for the component. The logger will write log messages to standard error and\n\n${MPF_LOG_PATH}/${THIS_MPF_NODE}/log/.log\n. Note that multiple instances of the\nsame component can log to the same file. Also, logging content can span multiple lines.\n\n\nThe following log levels are supported: \nFATAL, ERROR, WARN, INFO, DEBUG\n.\nThe \nLOG_LEVEL\n environment variable can be set to one of the log levels to change the logging\nverbosity. When \nLOG_LEVEL\n is absent, \nINFO\n is used.\n\n\nThe format of the log messages is:\n\n\nDATE TIME LEVEL [SOURCE_FILE:LINE_NUMBER] - MESSAGE\n\n\n\nFor example:\n\n\n2018-05-03 14:41:11,703 INFO [test_component.py:44] - Logged message",
"title": "Python Batch Component API"
},
{
@@ -1097,7 +1097,7 @@
},
{
"location": "/Python-Batch-Component-API/index.html#python-component-build-environment",
- "text": "All Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof ----.whl . -- are called compatibility tags . For example, mpf_component_api is pure Python, so the name of its wheel file is mpf_component_api-0.1-py3-none-any.whl . py3 means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. none means that it does not use the Python ABI. any means it will\nwork on any platform. The acceptable Python version tags are: cp312 (or lower) py312 (or lower) The ONLY acceptable ABI tags are: cp312 abi3 none The acceptable platform tags are: any linux_x86_64 manylinux2010_x86_64 manylinux2014_x86_64 manylinux1_x86_64 manylinux_2_5_x86_64 through manylinux_2_31_x86_64 Components should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.",
+ "text": "All Python components must work with CPython 3.12. Also, Python components\nmust work with the Linux version that is used by the OpenMPF Component\nExecutable. At this writing, OpenMPF runs on\nUbuntu 20.04 (kernel version 5.13.0-30). Pure Python code should work on any\nOS, but incompatibility issues can arise when using Python libraries that\ninclude compiled extension modules. Python libraries are typically distributed\nas wheel files. The wheel format requires that the file name follows the pattern\nof ----.whl . -- are called compatibility tags . For example, mpf_component_api is pure Python, so the name of its wheel file is mpf_component_api-0.1-py3-none-any.whl . py3 means it will work with any\nPython 3 implementation because it does not use any implementation-specific\nfeatures. none means that it does not use the Python ABI. any means it will\nwork on any platform. The acceptable Python version tags are: cp312 (or lower) py312 (or lower) The ONLY acceptable ABI tags are: cp312 abi3 none The acceptable platform tags are: any linux_x86_64 manylinux1_x86_64 manylinux2010_x86_64 manylinux2014_x86_64 manylinux_2_5_x86_64 through manylinux_2_39_x86_64 The full list of compatible tags can be listed by running: pip3 debug --verbose Components should be supplied as a tar file, which includes not only the component library, but any other libraries or\nfiles needed for execution. This includes all other non-standard libraries used by the component\n(aside from the standard Python libraries), and any configuration or data files.",
"title": "Python Component Build Environment"
},
{