diff --git a/docs/docs/answer-modes/3-counting-detectors.md b/docs/docs/answer-modes/3-counting-detectors.md index e245c3b7..7cc1b7bb 100644 --- a/docs/docs/answer-modes/3-counting-detectors.md +++ b/docs/docs/answer-modes/3-counting-detectors.md @@ -3,11 +3,11 @@ Counting detectors are used to count the number of objects in an image. Groundlight's counting detectors also return bounding boxes around the objects they count. ```python notest -from groundlight import ExperimentalApi -gl_exp = ExperimentalApi() +from groundlight import Groundlight +gl = Groundlight() # highlight-start -detector = gl_exp.create_counting_detector( +detector = gl.create_counting_detector( name="car-counter", query="How many cars are in the parking lot?", class_name="car", @@ -32,14 +32,14 @@ Counting Detectors are available on [Business and Enterprise plans](https://www. Now that you have created a counting detector, you can submit an image query to it. ```python notest -from groundlight import ExperimentalApi -gl_exp = ExperimentalApi() +from groundlight import Groundlight +gl = Groundlight() -detector = gl_exp.get_detector_by_name("car-counter") +detector = gl.get_detector_by_name("car-counter") # highlight-start # Count the number of cars in an image -image_query = gl_exp.submit_image_query(detector, "path/to/image.jpg") +image_query = gl.submit_image_query(detector, "path/to/image.jpg") # highlight-end print(f"Counted {image_query.result.count} cars") @@ -99,17 +99,17 @@ When adding a label to a counting detector, if you include ROIs, the number of R the count you are labeling. ```python notest -from groundlight import ExperimentalApi -gl_exp = ExperimentalApi() +from groundlight import Groundlight +gl = Groundlight() # highlight-start # Add a count label with corresponding ROIs to the image query from the previous example. # ROIs are specified as (left, top) and (right, bottom) coordinates, with values # between 0 and 1 representing the percentage of the image width and height. -roi1 = gl_exp.create_roi("car", (0.1, 0.2), (0.2, 0.3)) -roi2 = gl_exp.create_roi("car", (0.4, 0.4), (0.5, 0.6)) -roi3 = gl_exp.create_roi("car", (0.6, 0.5), (0.8, 0.9)) +roi1 = gl.create_roi("car", (0.1, 0.2), (0.2, 0.3)) +roi2 = gl.create_roi("car", (0.4, 0.4), (0.5, 0.6)) +roi3 = gl.create_roi("car", (0.6, 0.5), (0.8, 0.9)) rois = [roi1, roi2, roi3] -gl_exp.add_label(image_query, label=len(rois), rois=rois) +gl.add_label(image_query, label=len(rois), rois=rois) # highlight-end ``` \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 6f439de4..59e52b94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ packages = [ {include = "**/*.py", from = "src"}, ] readme = "README.md" -version = "0.22.7" +version = "0.23.0" [tool.poetry.dependencies] # For certifi, use ">=" instead of "^" since it upgrades its "major version" every year, not really following semver diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 9a55a906..a603edd3 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -5,24 +5,32 @@ import warnings from functools import partial from io import BufferedReader, BytesIO -from typing import Any, Callable, List, Optional, Union +from typing import Any, Callable, List, Optional, Tuple, Union from groundlight_openapi_client import Configuration +from groundlight_openapi_client.api.detector_groups_api import DetectorGroupsApi from groundlight_openapi_client.api.detectors_api import DetectorsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.labels_api import LabelsApi from groundlight_openapi_client.api.user_api import UserApi from groundlight_openapi_client.exceptions import NotFoundException, UnauthorizedException from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest +from groundlight_openapi_client.model.count_mode_configuration import CountModeConfiguration from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest +from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest from groundlight_openapi_client.model.label_value_request import LabelValueRequest +from groundlight_openapi_client.model.multi_class_mode_configuration import MultiClassModeConfiguration from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.roi_request import ROIRequest +from groundlight_openapi_client.model.status_enum import StatusEnum from model import ( ROI, + BBoxGeometry, BinaryClassificationResult, Detector, + DetectorGroup, ImageQuery, + ModeEnum, PaginatedDetectorList, PaginatedImageQueryList, ) @@ -57,7 +65,7 @@ class ApiTokenError(GroundlightClientError): pass -class Groundlight: # pylint: disable=too-many-instance-attributes +class Groundlight: # pylint: disable=too-many-instance-attributes,too-many-public-methods """ Client for accessing the Groundlight cloud service. Provides methods to create visual detectors, submit images for analysis, and retrieve predictions. @@ -173,6 +181,8 @@ def __init__( self.api_client = GroundlightApiClient(self.configuration) self.detectors_api = DetectorsApi(self.api_client) + self.detector_group_api = DetectorGroupsApi(self.api_client) + self.images_api = ImageQueriesApi(self.api_client) self.image_queries_api = ImageQueriesApi(self.api_client) self.user_api = UserApi(self.api_client) self.labels_api = LabelsApi(self.api_client) @@ -353,18 +363,22 @@ def create_detector( # noqa: PLR0913 name: str, query: str, *, + mode: ModeEnum = ModeEnum.BINARY, group_name: Optional[str] = None, confidence_threshold: Optional[float] = None, patience_time: Optional[float] = None, pipeline_config: Optional[str] = None, metadata: Union[dict, str, None] = None, + class_names: Optional[Union[List[str], str]] = None, ) -> Detector: """ Create a new Detector with a given name and query. - Counting and Multiclass detectors are in Beta, and can be created through the - ExperimentalApi via the :meth:`ExperimentalApi.create_counting_detector` and - :meth:`ExperimentalApi.create_multiclass_detector` methods. + By default will create a binary detector but alternate modes can be created by passing in a mode argument. + + Text and Bounding box detectors are in Beta, and can be created through the + ExperimentalApi via the :meth:`ExperimentalApi.create_text_recognition_detector` and + :meth:`ExperimentalApi.create_bounding_box_detector` methods. **Example usage**:: @@ -399,6 +413,7 @@ def create_detector( # noqa: PLR0913 this should be a yes/no question (e.g. "Is there a person in the image?"). :param group_name: Optional name of a group to organize related detectors together. If not specified, the detector will be placed in the default group. + :param mode: The mode of the detector. Defaults to ModeEnum.BINARY. :param confidence_threshold: A value between 0.5 and 1 that sets the minimum confidence level required for the ML model's predictions. If confidence is below this threshold, the query may be sent for human review. @@ -410,21 +425,57 @@ def create_detector( # noqa: PLR0913 the detector (limited to 1KB). This metadata can be used to store additional information like location, purpose, or related system IDs. You can retrieve this metadata later by calling `get_detector()`. + :param class_names: The name or names of the class to use for the detector. Only used for multi-class + and counting detectors. :return: The created Detector object """ - detector_creation_input = self._prep_create_detector( - name=name, - query=query, - group_name=group_name, - confidence_threshold=confidence_threshold, - patience_time=patience_time, - pipeline_config=pipeline_config, - metadata=metadata, + if mode == ModeEnum.BINARY: + if class_names is not None: + raise ValueError("class_names is not supported for binary detectors") + return self.create_binary_detector( + name=name, + query=query, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + if mode == ModeEnum.COUNT: + if class_names is None: + raise ValueError("class_names is required for counting detectors") + if isinstance(class_names, list): + raise ValueError("class_names must be a single string for counting detectors") + return self.create_counting_detector( + name=name, + query=query, + class_name=class_names, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + if mode == ModeEnum.MULTI_CLASS: + if class_names is None: + raise ValueError("class_names is required for multi-class detectors") + if isinstance(class_names, str): + raise ValueError("class_names must be a list for multi-class detectors") + return self.create_multiclass_detector( + name=name, + query=query, + class_names=class_names, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + raise ValueError( + f"Unsupported mode: {mode}, check if your desired mode is only supported in the ExperimentalApi" ) - obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) - return Detector.parse_obj(obj.to_dict()) def get_or_create_detector( # noqa: PLR0913 self, @@ -1205,3 +1256,385 @@ def update_detector_confidence_threshold(self, detector: Union[str, Detector], c self.detectors_api.update_detector( detector, patched_detector_request=PatchedDetectorRequest(confidence_threshold=confidence_threshold) ) + + def get_image(self, iq_id: str) -> bytes: + """ + Get the image associated with the given image query ID. + + **Example usage**:: + + gl = Groundlight() + + # Get image from an image query + iq = gl.get_image_query("iq_123") + image_bytes = gl.get_image(iq.id) + + # Open with PIL - returns RGB order + from PIL import Image + image = Image.open(gl.get_image(iq.id)) # Returns RGB image + + # Open with numpy via PIL - returns RGB order + import numpy as np + from io import BytesIO + image = np.array(Image.open(gl.get_image(iq.id))) # Returns RGB array + + # Open with OpenCV - returns BGR order + import cv2 + import numpy as np + nparr = np.frombuffer(image_bytes, np.uint8) + image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # Returns BGR array + # To convert to RGB if needed: + # image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + :param iq_id: The ID of the image query to get the image from + :return: The image as a byte array that can be used with PIL or other image libraries + """ + # TODO: support taking an ImageQuery object + return self.images_api.get_image(iq_id) + + def create_detector_group(self, name: str) -> DetectorGroup: + """ + Creates a detector group with the given name. A detector group allows you to organize + related detectors together. + + .. note:: + You can specify a detector group when creating a detector without the need to create it ahead of time. + The group will be created automatically if it doesn't exist. + + **Example usage**:: + + gl = Groundlight() + + # Create a group for all door-related detectors + door_group = gl.create_detector_group("door-detectors") + + # Later, create detectors in this group + door_open_detector = gl.create_detector( + name="front-door-open", + query="Is the front door open?", + detector_group=door_group + ) + + :param name: The name of the detector group. This should be descriptive and unique within your organization. + :type name: str + :return: A DetectorGroup object corresponding to the newly created detector group + :rtype: DetectorGroup + """ + return DetectorGroup(**self.detector_group_api.create_detector_group(DetectorGroupRequest(name=name)).to_dict()) + + def list_detector_groups(self) -> List[DetectorGroup]: + """ + Gets a list of all detector groups in your account. + + **Example usage**:: + + gl = Groundlight() + + # Get all detector groups + groups = gl.list_detector_groups() + + # Print information about each group + for group in groups: + print(f"Group name: {group.name}") + print(f"Group ID: {group.id}") + + :return: A list of DetectorGroup objects representing all detector groups in your account + """ + return [DetectorGroup(**det.to_dict()) for det in self.detector_group_api.get_detector_groups()] + + def create_roi(self, label: str, top_left: Tuple[float, float], bottom_right: Tuple[float, float]) -> ROI: + """ + Creates a Region of Interest (ROI) object that can be used to specify areas of interest in images. Certain + detectors (such as Count-mode detectors) may emit ROIs as part of their output. Providing an ROI can help + improve the accuracy of such detectors. + + .. note:: + ROI functionality is only available to Pro tier and higher. + If you would like to learn more, reach out to us at https://groundlight.ai + + **Example usage**:: + + gl = Groundlight() + + # Create an ROI for a door in the image + door_roi = gl.create_roi( + label="door", + top_left=(0.2, 0.3), # Coordinates are normalized (0-1) + bottom_right=(0.4, 0.8) # Coordinates are normalized (0-1) + ) + + # Use the ROI when submitting an image query + query = gl.submit_image_query( + detector="door-detector", + image=image_bytes, + rois=[door_roi] + ) + + :param label: A descriptive label for the object or area contained in the ROI + :param top_left: Tuple of (x, y) coordinates for the top-left corner, normalized to [0,1] + :param bottom_right: Tuple of (x, y) coordinates for the bottom-right corner, normalized to [0,1] + :return: An ROI object that can be used in image queries + """ + + return ROI( + label=label, + score=1.0, + geometry=BBoxGeometry( + left=top_left[0], + top=top_left[1], + right=bottom_right[0], + bottom=bottom_right[1], + x=(top_left[0] + bottom_right[0]) / 2, + y=(top_left[1] + bottom_right[1]) / 2, + ), + ) + + def update_detector_status(self, detector: Union[str, Detector], enabled: bool) -> None: + """ + Updates the status of the given detector. When a detector is disabled (enabled=False), + it will not accept or process any new image queries. Existing queries will not be affected. + + **Example usage**:: + + gl = Groundlight() + + # Using a detector object + detector = gl.get_detector("det_abc123") + gl.update_detector_status(detector, enabled=False) # Disable the detector + + # Using a detector ID string directly + gl.update_detector_status("det_abc123", enabled=True) # Enable the detector + + :param detector: Either a Detector object or a detector ID string starting with "det_". + The detector whose status should be updated. + :param enabled: Boolean indicating whether the detector should be enabled (True) or + disabled (False). When disabled, the detector will not process new queries. + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + self.detectors_api.update_detector( + detector, + patched_detector_request=PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF")), + ) + + def update_detector_escalation_type(self, detector: Union[str, Detector], escalation_type: str) -> None: + """ + Updates the escalation type of the given detector, controlling whether queries can be + sent to human labelers when ML confidence is low. + + This is particularly useful for controlling costs. When set to "NO_HUMAN_LABELING", + queries will only receive ML predictions, even if confidence is low. + When set to "STANDARD", low-confidence queries may be sent to human labelers for verification. + + **Example usage**:: + + gl = Groundlight() + + # Using a detector object + detector = gl.get_detector("det_abc123") + + # Disable human labeling + gl.update_detector_escalation_type(detector, "NO_HUMAN_LABELING") + + # Re-enable standard human labeling + gl.update_detector_escalation_type("det_abc123", "STANDARD") + + :param detector: Either a Detector object or a detector ID string starting with "det_". + The detector whose escalation type should be updated. + :param escalation_type: The new escalation type setting. Must be one of: + - "STANDARD": Allow human labeling for low-confidence queries + - "NO_HUMAN_LABELING": Never send queries to human labelers + + :return: None + :raises ValueError: If escalation_type is not one of the allowed values + """ + if isinstance(detector, Detector): + detector = detector.id + escalation_type = escalation_type.upper() + if escalation_type not in ["STANDARD", "NO_HUMAN_LABELING"]: + raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") + self.detectors_api.update_detector( + detector, + patched_detector_request=PatchedDetectorRequest(escalation_type=escalation_type), + ) + + def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals + self, + name: str, + query: str, + class_name: str, + *, + max_count: Optional[int] = None, + group_name: Optional[str] = None, + confidence_threshold: Optional[float] = None, + patience_time: Optional[float] = None, + pipeline_config: Optional[str] = None, + metadata: Union[dict, str, None] = None, + ) -> Detector: + """ + Creates a counting detector that can count objects in images up to a specified maximum count. + + **Example usage**:: + + gl = Groundlight() + + # Create a detector that counts people up to 5 + detector = gl.create_counting_detector( + name="people_counter", + query="How many people are in the image?", + class_name="person", + max_count=5, + confidence_threshold=0.9, + patience_time=30.0 + ) + + # Use the detector to count people in an image + image_query = gl.ask_ml(detector, "path/to/image.jpg") + print(f"Counted {image_query.result.count} people") + print(f"Confidence: {image_query.result.confidence}") + + :param name: A short, descriptive name for the detector. + :param query: A question about the count of an object in the image. + :param class_name: The class name of the object to count. + :param max_count: Maximum number of objects to count (default: 10) + :param group_name: Optional name of a group to organize related detectors together. + :param confidence_threshold: A value that sets the minimum confidence level required for the ML model's + predictions. If confidence is below this threshold, the query may be sent for human review. + :param patience_time: The maximum time in seconds that Groundlight will attempt to generate a + confident prediction before falling back to human review. Defaults to 30 seconds. + :param pipeline_config: Advanced usage only. Configuration string needed to instantiate a specific + prediction pipeline for this detector. + :param metadata: A dictionary or JSON string containing custom key/value pairs to associate with + the detector (limited to 1KB). This metadata can be used to store additional + information like location, purpose, or related system IDs. You can retrieve this + metadata later by calling `get_detector()`. + + :return: The created Detector object + """ + + detector_creation_input = self._prep_create_detector( + name=name, + query=query, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + detector_creation_input.mode = ModeEnum.COUNT + + if max_count is None: + mode_config = CountModeConfiguration(class_name=class_name) + else: + mode_config = CountModeConfiguration(class_name=class_name, max_count=max_count) + + detector_creation_input.mode_configuration = mode_config + obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) + return Detector.parse_obj(obj.to_dict()) + + def create_binary_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals + self, + name: str, + query: str, + *, + group_name: Optional[str] = None, + confidence_threshold: Optional[float] = None, + patience_time: Optional[float] = None, + pipeline_config: Optional[str] = None, + metadata: Union[dict, str, None] = None, + ) -> Detector: + """ + Creates a binary detector with the given name and query. + + **Example usage**:: + + gl = Groundlight() + + # Create a binary detector for a door + detector = gl.create_binary_detector( + name="door_detector", + query="Is there a door in the image?", + confidence_threshold=0.9, + patience_time=30.0 + ) + + # Use the detector to classify a door + image_query = gl.ask_ml(detector, "path/to/image.jpg") + """ + detector_creation_input = self._prep_create_detector( + name=name, + query=query, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) + return Detector.parse_obj(obj.to_dict()) + + def create_multiclass_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals + self, + name: str, + query: str, + class_names: List[str], + *, + group_name: Optional[str] = None, + confidence_threshold: Optional[float] = None, + patience_time: Optional[float] = None, + pipeline_config: Optional[str] = None, + metadata: Union[dict, str, None] = None, + ) -> Detector: + """ + Creates a multiclass detector with the given name and query. + + **Example usage**:: + + gl = Groundlight() + + detector = gl.create_multiclass_detector( + name="Traffic Light Detector", + query="What color is the traffic light?", + class_names=["Red", "Yellow", "Green"] + ) + + # Use the detector to classify a traffic light + image_query = gl.ask_ml(detector, "path/to/image.jpg") + print(f"Traffic light is {image_query.result.label}") + print(f"Confidence: {image_query.result.confidence}") + + :param name: A short, descriptive name for the detector. + :param query: A question about classifying objects in the image. + :param class_names: List of possible class labels for classification. + :param group_name: Optional name of a group to organize related detectors together. + :param confidence_threshold: A value between 1/num_classes and 1 that sets the minimum confidence level required + for the ML model's predictions. If confidence is below this threshold, + the query may be sent for human review. + :param patience_time: The maximum time in seconds that Groundlight will attempt to generate a + confident prediction before falling back to human review. Defaults to 30 seconds. + :param pipeline_config: Advanced usage only. Configuration string needed to instantiate a specific + prediction pipeline for this detector. + :param metadata: A dictionary or JSON string containing custom key/value pairs to associate with + the detector (limited to 1KB). This metadata can be used to store additional + information like location, purpose, or related system IDs. You can retrieve this + metadata later by calling `get_detector()`. + + :return: The created Detector object + """ + + detector_creation_input = self._prep_create_detector( + name=name, + query=query, + group_name=group_name, + confidence_threshold=confidence_threshold, + patience_time=patience_time, + pipeline_config=pipeline_config, + metadata=metadata, + ) + detector_creation_input.mode = ModeEnum.MULTI_CLASS + mode_config = MultiClassModeConfiguration(class_names=class_names) + detector_creation_input.mode_configuration = mode_config + obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) + return Detector.parse_obj(obj.to_dict()) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 3044d934..8ca92fd8 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -10,36 +10,28 @@ import json from io import BufferedReader, BytesIO from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Union import requests from groundlight_openapi_client.api.actions_api import ActionsApi from groundlight_openapi_client.api.detector_groups_api import DetectorGroupsApi from groundlight_openapi_client.api.detector_reset_api import DetectorResetApi from groundlight_openapi_client.api.edge_api import EdgeApi -from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.notes_api import NotesApi from groundlight_openapi_client.model.action_request import ActionRequest from groundlight_openapi_client.model.bounding_box_mode_configuration import BoundingBoxModeConfiguration from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition_request import ConditionRequest -from groundlight_openapi_client.model.count_mode_configuration import CountModeConfiguration -from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest -from groundlight_openapi_client.model.multi_class_mode_configuration import MultiClassModeConfiguration from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.payload_template_request import PayloadTemplateRequest from groundlight_openapi_client.model.rule_request import RuleRequest -from groundlight_openapi_client.model.status_enum import StatusEnum from groundlight_openapi_client.model.text_mode_configuration import TextModeConfiguration from groundlight_openapi_client.model.webhook_action_request import WebhookActionRequest from model import ( - ROI, Action, ActionList, - BBoxGeometry, Condition, Detector, - DetectorGroup, EdgeModelInfo, ModeEnum, PaginatedRuleList, @@ -107,7 +99,6 @@ def __init__( """ super().__init__(endpoint=endpoint, api_token=api_token, disable_tls_verification=disable_tls_verification) self.actions_api = ActionsApi(self.api_client) - self.images_api = ImageQueriesApi(self.api_client) self.notes_api = NotesApi(self.api_client) self.detector_group_api = DetectorGroupsApi(self.api_client) self.detector_reset_api = DetectorResetApi(self.api_client) @@ -515,41 +506,6 @@ def delete_all_rules(self, detector: Union[None, str, Detector] = None) -> int: self.delete_rule(rule_id) return num_rules - def get_image(self, iq_id: str) -> bytes: - """ - Get the image associated with the given image query ID. - - **Example usage**:: - - gl = ExperimentalApi() - - # Get image from an image query - iq = gl.get_image_query("iq_123") - image_bytes = gl.get_image(iq.id) - - # Open with PIL - returns RGB order - from PIL import Image - image = Image.open(gl.get_image(iq.id)) # Returns RGB image - - # Open with numpy via PIL - returns RGB order - import numpy as np - from io import BytesIO - image = np.array(Image.open(gl.get_image(iq.id))) # Returns RGB array - - # Open with OpenCV - returns BGR order - import cv2 - import numpy as np - nparr = np.frombuffer(image_bytes, np.uint8) - image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # Returns BGR array - # To convert to RGB if needed: - # image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - - :param iq_id: The ID of the image query to get the image from - :return: The image as a byte array that can be used with PIL or other image libraries - """ - # TODO: support taking an ImageQuery object - return self.images_api.get_image(iq_id) - def get_notes(self, detector: Union[str, Detector]) -> Dict[str, Any]: """ Retrieves all notes associated with a detector. @@ -619,103 +575,6 @@ def create_note( response = requests.post(url, headers=headers, data=data, files=files, params=params) # type: ignore response.raise_for_status() # Raise an exception for error status codes - def create_detector_group(self, name: str) -> DetectorGroup: - """ - Creates a detector group with the given name. A detector group allows you to organize - related detectors together. - - .. note:: - You can specify a detector group when creating a detector without the need to create it ahead of time. - The group will be created automatically if it doesn't exist. - - **Example usage**:: - - gl = ExperimentalApi() - - # Create a group for all door-related detectors - door_group = gl.create_detector_group("door-detectors") - - # Later, create detectors in this group - door_open_detector = gl.create_detector( - name="front-door-open", - query="Is the front door open?", - detector_group=door_group - ) - - :param name: The name of the detector group. This should be descriptive and unique within your organization. - :type name: str - :return: A DetectorGroup object corresponding to the newly created detector group - :rtype: DetectorGroup - """ - return DetectorGroup(**self.detector_group_api.create_detector_group(DetectorGroupRequest(name=name)).to_dict()) - - def list_detector_groups(self) -> List[DetectorGroup]: - """ - Gets a list of all detector groups in your account. - - **Example usage**:: - - gl = ExperimentalApi() - - # Get all detector groups - groups = gl.list_detector_groups() - - # Print information about each group - for group in groups: - print(f"Group name: {group.name}") - print(f"Group ID: {group.id}") - - :return: A list of DetectorGroup objects representing all detector groups in your account - """ - return [DetectorGroup(**det.to_dict()) for det in self.detector_group_api.get_detector_groups()] - - def create_roi(self, label: str, top_left: Tuple[float, float], bottom_right: Tuple[float, float]) -> ROI: - """ - Creates a Region of Interest (ROI) object that can be used to specify areas of interest in images. Certain - detectors (such as Count-mode detectors) may emit ROIs as part of their output. Providing an ROI can help - improve the accuracy of such detectors. - - .. note:: - ROI functionality is only available to Pro tier and higher. - If you would like to learn more, reach out to us at https://groundlight.ai - - **Example usage**:: - - gl = ExperimentalApi() - - # Create an ROI for a door in the image - door_roi = gl.create_roi( - label="door", - top_left=(0.2, 0.3), # Coordinates are normalized (0-1) - bottom_right=(0.4, 0.8) # Coordinates are normalized (0-1) - ) - - # Use the ROI when submitting an image query - query = gl.submit_image_query( - detector="door-detector", - image=image_bytes, - rois=[door_roi] - ) - - :param label: A descriptive label for the object or area contained in the ROI - :param top_left: Tuple of (x, y) coordinates for the top-left corner, normalized to [0,1] - :param bottom_right: Tuple of (x, y) coordinates for the bottom-right corner, normalized to [0,1] - :return: An ROI object that can be used in image queries - """ - - return ROI( - label=label, - score=1.0, - geometry=BBoxGeometry( - left=top_left[0], - top=top_left[1], - right=bottom_right[0], - bottom=bottom_right[1], - x=(top_left[0] + bottom_right[0]) / 2, - y=(top_left[1] + bottom_right[1]) / 2, - ), - ) - def reset_detector(self, detector: Union[str, Detector]) -> None: """ Removes all image queries and training data for the given detector. This effectively resets @@ -769,215 +628,6 @@ def update_detector_name(self, detector: Union[str, Detector], name: str) -> Non detector = detector.id self.detectors_api.update_detector(detector, patched_detector_request=PatchedDetectorRequest(name=name)) - def update_detector_status(self, detector: Union[str, Detector], enabled: bool) -> None: - """ - Updates the status of the given detector. When a detector is disabled (enabled=False), - it will not accept or process any new image queries. Existing queries will not be affected. - - **Example usage**:: - - gl = ExperimentalApi() - - # Using a detector object - detector = gl.get_detector("det_abc123") - gl.update_detector_status(detector, enabled=False) # Disable the detector - - # Using a detector ID string directly - gl.update_detector_status("det_abc123", enabled=True) # Enable the detector - - :param detector: Either a Detector object or a detector ID string starting with "det_". - The detector whose status should be updated. - :param enabled: Boolean indicating whether the detector should be enabled (True) or - disabled (False). When disabled, the detector will not process new queries. - - :return: None - """ - if isinstance(detector, Detector): - detector = detector.id - self.detectors_api.update_detector( - detector, - patched_detector_request=PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF")), - ) - - def update_detector_escalation_type(self, detector: Union[str, Detector], escalation_type: str) -> None: - """ - Updates the escalation type of the given detector, controlling whether queries can be - sent to human labelers when ML confidence is low. - - This is particularly useful for controlling costs. When set to "NO_HUMAN_LABELING", - queries will only receive ML predictions, even if confidence is low. - When set to "STANDARD", low-confidence queries may be sent to human labelers for verification. - - **Example usage**:: - - gl = ExperimentalApi() - - # Using a detector object - detector = gl.get_detector("det_abc123") - - # Disable human labeling - gl.update_detector_escalation_type(detector, "NO_HUMAN_LABELING") - - # Re-enable standard human labeling - gl.update_detector_escalation_type("det_abc123", "STANDARD") - - :param detector: Either a Detector object or a detector ID string starting with "det_". - The detector whose escalation type should be updated. - :param escalation_type: The new escalation type setting. Must be one of: - - "STANDARD": Allow human labeling for low-confidence queries - - "NO_HUMAN_LABELING": Never send queries to human labelers - - :return: None - :raises ValueError: If escalation_type is not one of the allowed values - """ - if isinstance(detector, Detector): - detector = detector.id - escalation_type = escalation_type.upper() - if escalation_type not in ["STANDARD", "NO_HUMAN_LABELING"]: - raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") - self.detectors_api.update_detector( - detector, - patched_detector_request=PatchedDetectorRequest(escalation_type=escalation_type), - ) - - def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals - self, - name: str, - query: str, - class_name: str, - *, - max_count: Optional[int] = None, - group_name: Optional[str] = None, - confidence_threshold: Optional[float] = None, - patience_time: Optional[float] = None, - pipeline_config: Optional[str] = None, - metadata: Union[dict, str, None] = None, - ) -> Detector: - """ - Creates a counting detector that can count objects in images up to a specified maximum count. - - **Example usage**:: - - gl = ExperimentalApi() - - # Create a detector that counts people up to 5 - detector = gl.create_counting_detector( - name="people_counter", - query="How many people are in the image?", - class_name="person", - max_count=5, - confidence_threshold=0.9, - patience_time=30.0 - ) - - # Use the detector to count people in an image - image_query = gl.ask_ml(detector, "path/to/image.jpg") - print(f"Counted {image_query.result.count} people") - print(f"Confidence: {image_query.result.confidence}") - - :param name: A short, descriptive name for the detector. - :param query: A question about the count of an object in the image. - :param class_name: The class name of the object to count. - :param max_count: Maximum number of objects to count (default: 10) - :param group_name: Optional name of a group to organize related detectors together. - :param confidence_threshold: A value that sets the minimum confidence level required for the ML model's - predictions. If confidence is below this threshold, the query may be sent for human review. - :param patience_time: The maximum time in seconds that Groundlight will attempt to generate a - confident prediction before falling back to human review. Defaults to 30 seconds. - :param pipeline_config: Advanced usage only. Configuration string needed to instantiate a specific - prediction pipeline for this detector. - :param metadata: A dictionary or JSON string containing custom key/value pairs to associate with - the detector (limited to 1KB). This metadata can be used to store additional - information like location, purpose, or related system IDs. You can retrieve this - metadata later by calling `get_detector()`. - - :return: The created Detector object - """ - - detector_creation_input = self._prep_create_detector( - name=name, - query=query, - group_name=group_name, - confidence_threshold=confidence_threshold, - patience_time=patience_time, - pipeline_config=pipeline_config, - metadata=metadata, - ) - detector_creation_input.mode = ModeEnum.COUNT - - if max_count is None: - mode_config = CountModeConfiguration(class_name=class_name) - else: - mode_config = CountModeConfiguration(max_count=max_count, class_name=class_name) - - detector_creation_input.mode_configuration = mode_config - obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) - return Detector.parse_obj(obj.to_dict()) - - def create_multiclass_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals - self, - name: str, - query: str, - class_names: List[str], - *, - group_name: Optional[str] = None, - confidence_threshold: Optional[float] = None, - patience_time: Optional[float] = None, - pipeline_config: Optional[str] = None, - metadata: Union[dict, str, None] = None, - ) -> Detector: - """ - Creates a multiclass detector with the given name and query. - - **Example usage**:: - - gl = ExperimentalApi() - - detector = gl.create_multiclass_detector( - name="Traffic Light Detector", - query="What color is the traffic light?", - class_names=["Red", "Yellow", "Green"] - ) - - # Use the detector to classify a traffic light - image_query = gl.ask_ml(detector, "path/to/image.jpg") - print(f"Traffic light is {image_query.result.label}") - print(f"Confidence: {image_query.result.confidence}") - - :param name: A short, descriptive name for the detector. - :param query: A question about classifying objects in the image. - :param class_names: List of possible class labels for classification. - :param group_name: Optional name of a group to organize related detectors together. - :param confidence_threshold: A value between 1/num_classes and 1 that sets the minimum confidence level required - for the ML model's predictions. If confidence is below this threshold, - the query may be sent for human review. - :param patience_time: The maximum time in seconds that Groundlight will attempt to generate a - confident prediction before falling back to human review. Defaults to 30 seconds. - :param pipeline_config: Advanced usage only. Configuration string needed to instantiate a specific - prediction pipeline for this detector. - :param metadata: A dictionary or JSON string containing custom key/value pairs to associate with - the detector (limited to 1KB). This metadata can be used to store additional - information like location, purpose, or related system IDs. You can retrieve this - metadata later by calling `get_detector()`. - - :return: The created Detector object - """ - - detector_creation_input = self._prep_create_detector( - name=name, - query=query, - group_name=group_name, - confidence_threshold=confidence_threshold, - patience_time=patience_time, - pipeline_config=pipeline_config, - metadata=metadata, - ) - detector_creation_input.mode = ModeEnum.MULTI_CLASS - mode_config = MultiClassModeConfiguration(class_names=class_names) - detector_creation_input.mode_configuration = mode_config - obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) - return Detector.parse_obj(obj.to_dict()) - def create_bounding_box_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals self, name: str, diff --git a/test/integration/test_groundlight.py b/test/integration/test_groundlight.py index 08c0c08a..c68e47ea 100644 --- a/test/integration/test_groundlight.py +++ b/test/integration/test_groundlight.py @@ -21,6 +21,7 @@ CountingResult, Detector, ImageQuery, + ModeEnum, MultiClassificationResult, PaginatedDetectorList, PaginatedImageQueryList, @@ -87,6 +88,16 @@ def test_create_detector(gl: Groundlight): _detector.confidence_threshold == DEFAULT_CONFIDENCE_THRESHOLD ), "We expected the default confidence threshold to be used." + # Test creating dectors with other modes + name = f"Test {datetime.utcnow()}" # Need a unique name + count_detector = gl.create_detector(name=name, query=query, mode=ModeEnum.COUNT, class_names="dog") + assert str(count_detector) + name = f"Test {datetime.utcnow()}" # Need a unique name + multiclass_detector = gl.create_detector( + name=name, query=query, mode=ModeEnum.MULTI_CLASS, class_names=["dog", "cat"] + ) + assert str(multiclass_detector) + def test_create_detector_with_pipeline_config(gl: Groundlight): # "never-review" is a special model that always returns the same result with 100% confidence. @@ -781,3 +792,59 @@ def test_submit_image_query_with_empty_inspection_id(gl: Groundlight, detector: human_review="NEVER", inspection_id="", ) + + +def test_binary_detector(gl: Groundlight): + """ + verify that we can create and submit to a binary detector + """ + name = f"Test {datetime.utcnow()}" + created_detector = gl.create_binary_detector(name, "Is there a dog", confidence_threshold=0.0) + assert created_detector is not None + binary_iq = gl.submit_image_query(created_detector, "test/assets/dog.jpeg") + assert binary_iq.result.label is not None + + +def test_counting_detector(gl: Groundlight): + """ + verify that we can create and submit to a counting detector + """ + name = f"Test {datetime.utcnow()}" + created_detector = gl.create_counting_detector(name, "How many dogs", "dog", confidence_threshold=0.0) + assert created_detector is not None + count_iq = gl.submit_image_query(created_detector, "test/assets/dog.jpeg") + assert count_iq.result.count is not None + + +def test_counting_detector_async(gl: Groundlight): + """ + verify that we can create and submit to a counting detector + """ + name = f"Test {datetime.utcnow()}" + created_detector = gl.create_counting_detector(name, "How many dogs", "dog", confidence_threshold=0.0) + assert created_detector is not None + async_iq = gl.ask_async(created_detector, "test/assets/dog.jpeg") + # attempting to access fields within the result should raise an exception + with pytest.raises(AttributeError): + _ = async_iq.result.label # type: ignore + with pytest.raises(AttributeError): + _ = async_iq.result.confidence # type: ignore + time.sleep(5) + # you should be able to get a "real" result by retrieving an updated image query object from the server + _image_query = gl.get_image_query(id=async_iq.id) + assert _image_query.result is not None + + +def test_multiclass_detector(gl: Groundlight): + """ + verify that we can create and submit to a multi-class detector + """ + name = f"Test {datetime.utcnow()}" + class_names = ["Golden Retriever", "Labrador Retriever", "Poodle"] + created_detector = gl.create_multiclass_detector( + name, "What kind of dog is this?", class_names=class_names, confidence_threshold=0.0 + ) + assert created_detector is not None + mc_iq = gl.submit_image_query(created_detector, "test/assets/dog.jpeg") + assert mc_iq.result.label is not None + assert mc_iq.result.label in class_names diff --git a/test/unit/test_experimental.py b/test/unit/test_experimental.py index fad10299..157cfe04 100644 --- a/test/unit/test_experimental.py +++ b/test/unit/test_experimental.py @@ -90,51 +90,6 @@ def test_submit_multiple_rois(gl_experimental: ExperimentalApi, image_query_one: gl_experimental.add_label(image_query_one, 3, [roi] * 3) -def test_counting_detector(gl_experimental: ExperimentalApi): - """ - verify that we can create and submit to a counting detector - """ - name = f"Test {datetime.utcnow()}" - created_detector = gl_experimental.create_counting_detector(name, "How many dogs", "dog", confidence_threshold=0.0) - assert created_detector is not None - count_iq = gl_experimental.submit_image_query(created_detector, "test/assets/dog.jpeg") - assert count_iq.result.count is not None - - -def test_counting_detector_async(gl_experimental: ExperimentalApi): - """ - verify that we can create and submit to a counting detector - """ - name = f"Test {datetime.utcnow()}" - created_detector = gl_experimental.create_counting_detector(name, "How many dogs", "dog", confidence_threshold=0.0) - assert created_detector is not None - async_iq = gl_experimental.ask_async(created_detector, "test/assets/dog.jpeg") - # attempting to access fields within the result should raise an exception - with pytest.raises(AttributeError): - _ = async_iq.result.label # type: ignore - with pytest.raises(AttributeError): - _ = async_iq.result.confidence # type: ignore - time.sleep(5) - # you should be able to get a "real" result by retrieving an updated image query object from the server - _image_query = gl_experimental.get_image_query(id=async_iq.id) - assert _image_query.result is not None - - -def test_multiclass_detector(gl_experimental: ExperimentalApi): - """ - verify that we can create and submit to a multi-class detector - """ - name = f"Test {datetime.utcnow()}" - class_names = ["Golden Retriever", "Labrador Retriever", "Poodle"] - created_detector = gl_experimental.create_multiclass_detector( - name, "What kind of dog is this?", class_names=class_names, confidence_threshold=0.0 - ) - assert created_detector is not None - mc_iq = gl_experimental.submit_image_query(created_detector, "test/assets/dog.jpeg") - assert mc_iq.result.label is not None - assert mc_iq.result.label in class_names - - def test_text_recognition_detector(gl_experimental: ExperimentalApi): """ verify that we can create and submit to a text recognition detector