diff --git a/README.md b/README.md index 7655dbe..8abcc4d 100644 --- a/README.md +++ b/README.md @@ -51,23 +51,22 @@ Using this API, detection components can be built to provide: ## Getting Started ### Build and Install the Component SDK -* If not already installed, - [build and install Python 3.8.](https://openmpf.github.io/docs/site/Development-Environment-Guide#python-38) +* If not already installed, + [build and install Python 3.12.](https://openmpf.github.io/docs/site/Development-Environment-Guide#python-312) * Install the Python Component API: `pip3 install openmpf-python-component-sdk/detection/api` * (Optional) Install the Python Component Utilities: `pip3 install openmpf-python-component-sdk/detection/component_util` ### Using the Component SDK -Please read the -[Python Batch Component API documentation](https://openmpf.github.io/docs/site/Python-Batch-Component-API) +Please read the +[Python Batch Component API documentation](https://openmpf.github.io/docs/site/Python-Batch-Component-API) to get started. ## Project Website -For more information about OpenMPF, including documentation, guides, and other material, visit our +For more information about OpenMPF, including documentation, guides, and other material, visit our [website](https://openmpf.github.io/). ## Project Workboard For a latest snapshot of what tasks are being worked on, what's available to pick up, and where the project stands as a whole, check out our [workboard](https://github.com/orgs/openmpf/projects/11). - diff --git a/detection/component_util/mpf_component_util/models_ini_parser.py b/detection/component_util/mpf_component_util/models_ini_parser.py index cfc0a5f..6db3eaa 100644 --- a/detection/component_util/mpf_component_util/models_ini_parser.py +++ b/detection/component_util/mpf_component_util/models_ini_parser.py @@ -25,15 +25,26 @@ ############################################################################# import configparser +import importlib.resources import os -from typing import Any, Collection, Callable, List, Optional +from importlib.resources.abc import Traversable +from pathlib import Path +from typing import Any, Callable, Collection, List, Optional import mpf_component_api as mpf -class ModelsIniParser(object): - def __init__(self, plugin_models_dir: str): - self._plugin_models_dir = plugin_models_dir +class ModelsIniParser: + def __init__(self, plugin_models_dir: str | Traversable | Path): + match plugin_models_dir: + case str(): + self._plugin_models_dir = plugin_models_dir + case Path(): + self._plugin_models_dir = str(plugin_models_dir) + case Traversable(): + path = importlib.resources.as_file(plugin_models_dir).__enter__() + self._plugin_models_dir = str(path) + self._fields: List[_FieldInfo] = [] def register_field(self, name: str, field_type: Callable[[str], Any] = str) -> 'ModelsIniParser': diff --git a/detection/examples/PythonOcvComponent/ocv_component/ocv_component.py b/detection/examples/PythonOcvComponent/ocv_component/ocv_component.py index 9696c6a..da3c704 100644 --- a/detection/examples/PythonOcvComponent/ocv_component/ocv_component.py +++ b/detection/examples/PythonOcvComponent/ocv_component/ocv_component.py @@ -25,7 +25,7 @@ ############################################################################# import logging -import pkg_resources +import importlib.resources import os from typing import Iterable @@ -84,9 +84,7 @@ def get_detections_from_video_capture( return [mpf.VideoTrack(0, last_frame_read, frame_locations=detections)] - - -ModelSettings = (mpf_util.ModelsIniParser(pkg_resources.resource_filename(__name__, 'models')) +ModelSettings = (mpf_util.ModelsIniParser(importlib.resources.files(__name__) / 'models') .register_path_field('network') .register_path_field('names') .register_int_field('num_classes') diff --git a/detection/nlp_text_splitter/README.md b/detection/nlp_text_splitter/README.md index 46956fa..e8c7d14 100644 --- a/detection/nlp_text_splitter/README.md +++ b/detection/nlp_text_splitter/README.md @@ -26,7 +26,7 @@ this model lacks support handling for Chinese punctuation. To install this tool users will need to run `./install.sh`. By default this will set up a CPU-only PyTorch installation. `./install.sh` requires a C++ compiler and the Python development headers to be installed. If they are not already installed, they can be installed by running -`apt-get install g++ python3.8-dev`. +`apt-get install g++ python3.12-dev`. Please note that several customizations are supported: diff --git a/detection/nlp_text_splitter/nlp_text_splitter/__init__.py b/detection/nlp_text_splitter/nlp_text_splitter/__init__.py index f083a5f..3913b9a 100644 --- a/detection/nlp_text_splitter/nlp_text_splitter/__init__.py +++ b/detection/nlp_text_splitter/nlp_text_splitter/__init__.py @@ -26,7 +26,8 @@ import logging import os -import pkg_resources +import importlib.resources +from importlib.resources.abc import Traversable import spacy from wtpsplit import WtP @@ -40,9 +41,7 @@ DEFAULT_WTP_MODELS = "/opt/wtp/models" # If we want to package model installation with this utility in the future: -WTP_MODELS_PATH = pkg_resources.resource_filename( - __name__, "models" -) +WTP_MODELS_PATH: Traversable = importlib.resources.files(__name__) / 'models' log = logging.getLogger(__name__) @@ -110,22 +109,21 @@ def _update_wtp_model(self, wtp_model_name: str, self._model_setting = model_setting self._model_name = wtp_model_name # Check if model has been downloaded - if os.path.exists(os.path.join(WTP_MODELS_PATH, wtp_model_name)): + if (WTP_MODELS_PATH / wtp_model_name).is_file(): log.info(f"Using downloaded {wtp_model_name} model.") - wtp_model_name = os.path.join(WTP_MODELS_PATH, wtp_model_name) - + with importlib.resources.as_file(WTP_MODELS_PATH / wtp_model_name) as path: + self.wtp_model = WtP(str(path)) elif os.path.exists(os.path.join(DEFAULT_WTP_MODELS, wtp_model_name)): log.info(f"Using downloaded {wtp_model_name} model.") wtp_model_name = os.path.join(DEFAULT_WTP_MODELS, wtp_model_name) - + self.wtp_model = WtP(wtp_model_name) else: log.warning(f"Model {wtp_model_name} not found, " "downloading from hugging face.") - - self.wtp_model = WtP(wtp_model_name) + self.wtp_model = WtP(wtp_model_name) if model_setting != "cpu" and model_setting != "cuda": log.warning(f"Invalid setting for WtP runtime {model_setting}. "