From 8d8fc4ff6a8a8765d0066ef4043ddfec42b667ef Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Wed, 2 Apr 2025 04:08:57 +0000 Subject: [PATCH 01/13] Make most evaluation results subfields nullable to avoid ApiTypeErrors when requesting evaluation results for count mode detectors --- generated/README.md | 2 +- .../InlineResponse2001EvaluationResults.md | 12 +++-- .../groundlight_openapi_client/__init__.py | 2 +- .../api/actions_api.py | 2 +- .../api/detector_groups_api.py | 2 +- .../api/detector_reset_api.py | 2 +- .../api/detectors_api.py | 2 +- .../api/edge_api.py | 2 +- .../api/image_queries_api.py | 2 +- .../api/labels_api.py | 2 +- .../api/notes_api.py | 2 +- .../api/user_api.py | 2 +- .../groundlight_openapi_client/api_client.py | 2 +- .../configuration.py | 4 +- .../groundlight_openapi_client/exceptions.py | 2 +- .../model/action.py | 2 +- .../model/action_list.py | 2 +- .../model/all_notes.py | 2 +- .../model/annotations_requested_enum.py | 2 +- .../model/b_box_geometry.py | 2 +- .../model/b_box_geometry_request.py | 2 +- .../model/binary_classification_result.py | 2 +- .../model/blank_enum.py | 2 +- .../model/bounding_box_mode_configuration.py | 2 +- .../model/bounding_box_result.py | 2 +- .../model/channel_enum.py | 2 +- .../model/condition.py | 2 +- .../model/condition_request.py | 2 +- .../model/count_mode_configuration.py | 2 +- .../model/counting_result.py | 2 +- .../model/detector.py | 2 +- .../model/detector_creation_input_request.py | 2 +- .../model/detector_group.py | 2 +- .../model/detector_group_request.py | 2 +- .../model/detector_type_enum.py | 2 +- .../model/edge_model_info.py | 2 +- .../model/escalation_type_enum.py | 2 +- .../model/image_query.py | 2 +- .../model/image_query_type_enum.py | 2 +- .../model/inline_response200.py | 2 +- .../model/inline_response2001.py | 2 +- .../inline_response2001_evaluation_results.py | 49 ++++++++++++++----- .../model/inline_response2002.py | 2 +- .../model/inline_response200_summary.py | 2 +- ...inline_response200_summary_class_counts.py | 2 +- .../model/label_value.py | 2 +- .../model/label_value_request.py | 2 +- .../model/mode_enum.py | 2 +- .../model/multi_class_mode_configuration.py | 2 +- .../model/multi_classification_result.py | 2 +- .../groundlight_openapi_client/model/note.py | 2 +- .../model/note_request.py | 2 +- .../model/paginated_detector_list.py | 2 +- .../model/paginated_image_query_list.py | 2 +- .../model/paginated_rule_list.py | 2 +- .../model/patched_detector_request.py | 2 +- .../model/payload_template.py | 2 +- .../model/payload_template_request.py | 2 +- .../model/result_type_enum.py | 2 +- .../groundlight_openapi_client/model/roi.py | 2 +- .../model/roi_request.py | 2 +- .../groundlight_openapi_client/model/rule.py | 2 +- .../model/rule_request.py | 2 +- .../model/snooze_time_unit_enum.py | 2 +- .../model/source_enum.py | 2 +- .../model/status_enum.py | 2 +- .../model/text_mode_configuration.py | 2 +- .../model/text_recognition_result.py | 2 +- .../model/verb_enum.py | 2 +- .../model/webhook_action.py | 2 +- .../model/webhook_action_request.py | 2 +- .../groundlight_openapi_client/model_utils.py | 2 +- generated/groundlight_openapi_client/rest.py | 2 +- generated/model.py | 2 +- generated/setup.py | 2 +- spec/public-api.yaml | 15 +++++- 76 files changed, 131 insertions(+), 93 deletions(-) diff --git a/generated/README.md b/generated/README.md index 4a84c1f2..b7fddd47 100644 --- a/generated/README.md +++ b/generated/README.md @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.18.2 +- API version: 0.18.3 - Package version: 1.0.0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen diff --git a/generated/docs/InlineResponse2001EvaluationResults.md b/generated/docs/InlineResponse2001EvaluationResults.md index 01f845e5..1c0be988 100644 --- a/generated/docs/InlineResponse2001EvaluationResults.md +++ b/generated/docs/InlineResponse2001EvaluationResults.md @@ -7,11 +7,13 @@ Name | Type | Description | Notes **eval_timestamp** | **datetime** | | [optional] **total_ground_truth_examples** | **int, none_type** | | [optional] **kfold_pooled__balanced_accuracy** | **float** | | [optional] -**kfold_pooled__positive_accuracy** | **float** | | [optional] -**kfold_pooled__negative_accuracy** | **float** | | [optional] -**balanced_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | [optional] -**positive_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | [optional] -**negative_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | [optional] +**kfold_pooled__positive_accuracy** | **float, none_type** | | [optional] +**kfold_pooled__negative_accuracy** | **float, none_type** | | [optional] +**precision** | **float, none_type** | | [optional] +**recall** | **float, none_type** | | [optional] +**balanced_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] +**positive_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] +**negative_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/groundlight_openapi_client/__init__.py b/generated/groundlight_openapi_client/__init__.py index ff1e9a1d..45ed51c2 100644 --- a/generated/groundlight_openapi_client/__init__.py +++ b/generated/groundlight_openapi_client/__init__.py @@ -5,7 +5,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/actions_api.py b/generated/groundlight_openapi_client/api/actions_api.py index 4f9d42f5..0dbc94cb 100644 --- a/generated/groundlight_openapi_client/api/actions_api.py +++ b/generated/groundlight_openapi_client/api/actions_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_groups_api.py b/generated/groundlight_openapi_client/api/detector_groups_api.py index 83a2cb4f..91ada22a 100644 --- a/generated/groundlight_openapi_client/api/detector_groups_api.py +++ b/generated/groundlight_openapi_client/api/detector_groups_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_reset_api.py b/generated/groundlight_openapi_client/api/detector_reset_api.py index c50532f8..cc51c357 100644 --- a/generated/groundlight_openapi_client/api/detector_reset_api.py +++ b/generated/groundlight_openapi_client/api/detector_reset_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index eca18f45..33a41d48 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/edge_api.py b/generated/groundlight_openapi_client/api/edge_api.py index a0b3187d..a0f2b212 100644 --- a/generated/groundlight_openapi_client/api/edge_api.py +++ b/generated/groundlight_openapi_client/api/edge_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/image_queries_api.py b/generated/groundlight_openapi_client/api/image_queries_api.py index 03ad2600..8cb2d80e 100644 --- a/generated/groundlight_openapi_client/api/image_queries_api.py +++ b/generated/groundlight_openapi_client/api/image_queries_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/labels_api.py b/generated/groundlight_openapi_client/api/labels_api.py index fd948a9f..27d4c37f 100644 --- a/generated/groundlight_openapi_client/api/labels_api.py +++ b/generated/groundlight_openapi_client/api/labels_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index f1af2cb5..287cad2b 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/user_api.py b/generated/groundlight_openapi_client/api/user_api.py index a14200f2..6e6481ea 100644 --- a/generated/groundlight_openapi_client/api/user_api.py +++ b/generated/groundlight_openapi_client/api/user_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api_client.py b/generated/groundlight_openapi_client/api_client.py index da2e348a..2e8e4d56 100644 --- a/generated/groundlight_openapi_client/api_client.py +++ b/generated/groundlight_openapi_client/api_client.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/configuration.py b/generated/groundlight_openapi_client/configuration.py index 654b832f..c8c0a864 100644 --- a/generated/groundlight_openapi_client/configuration.py +++ b/generated/groundlight_openapi_client/configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -419,7 +419,7 @@ def to_debug_report(self): "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" - "Version of the API: 0.18.2\n" + "Version of the API: 0.18.3\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) ) diff --git a/generated/groundlight_openapi_client/exceptions.py b/generated/groundlight_openapi_client/exceptions.py index 393dbba8..c29a0632 100644 --- a/generated/groundlight_openapi_client/exceptions.py +++ b/generated/groundlight_openapi_client/exceptions.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action.py b/generated/groundlight_openapi_client/model/action.py index 8308d44b..139a4111 100644 --- a/generated/groundlight_openapi_client/model/action.py +++ b/generated/groundlight_openapi_client/model/action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action_list.py b/generated/groundlight_openapi_client/model/action_list.py index a38fae04..e2233883 100644 --- a/generated/groundlight_openapi_client/model/action_list.py +++ b/generated/groundlight_openapi_client/model/action_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/all_notes.py b/generated/groundlight_openapi_client/model/all_notes.py index 89da5af1..122e0fe8 100644 --- a/generated/groundlight_openapi_client/model/all_notes.py +++ b/generated/groundlight_openapi_client/model/all_notes.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/annotations_requested_enum.py b/generated/groundlight_openapi_client/model/annotations_requested_enum.py index d951c99e..3773d934 100644 --- a/generated/groundlight_openapi_client/model/annotations_requested_enum.py +++ b/generated/groundlight_openapi_client/model/annotations_requested_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry.py b/generated/groundlight_openapi_client/model/b_box_geometry.py index 2282dfd7..2e806115 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry_request.py b/generated/groundlight_openapi_client/model/b_box_geometry_request.py index fcac579f..e1e8e73b 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry_request.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/binary_classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py index 25ef477e..d34681f1 100644 --- a/generated/groundlight_openapi_client/model/binary_classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/blank_enum.py b/generated/groundlight_openapi_client/model/blank_enum.py index aa466bb8..31919708 100644 --- a/generated/groundlight_openapi_client/model/blank_enum.py +++ b/generated/groundlight_openapi_client/model/blank_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py b/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py index bcf98f27..f78803b0 100644 --- a/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/bounding_box_result.py b/generated/groundlight_openapi_client/model/bounding_box_result.py index 3c18763d..2a80e3d2 100644 --- a/generated/groundlight_openapi_client/model/bounding_box_result.py +++ b/generated/groundlight_openapi_client/model/bounding_box_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/channel_enum.py b/generated/groundlight_openapi_client/model/channel_enum.py index 720dac0d..2ad2285d 100644 --- a/generated/groundlight_openapi_client/model/channel_enum.py +++ b/generated/groundlight_openapi_client/model/channel_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition.py b/generated/groundlight_openapi_client/model/condition.py index 6e12ea64..abdc7c4a 100644 --- a/generated/groundlight_openapi_client/model/condition.py +++ b/generated/groundlight_openapi_client/model/condition.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition_request.py b/generated/groundlight_openapi_client/model/condition_request.py index 3172892a..dadfd994 100644 --- a/generated/groundlight_openapi_client/model/condition_request.py +++ b/generated/groundlight_openapi_client/model/condition_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/count_mode_configuration.py b/generated/groundlight_openapi_client/model/count_mode_configuration.py index 9c2d3be9..0068e9e9 100644 --- a/generated/groundlight_openapi_client/model/count_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/count_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/counting_result.py b/generated/groundlight_openapi_client/model/counting_result.py index 9d7ff477..23266160 100644 --- a/generated/groundlight_openapi_client/model/counting_result.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index 04c6beb8..99d4c04d 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_creation_input_request.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py index ca1b1563..dc8f5011 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input_request.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group.py b/generated/groundlight_openapi_client/model/detector_group.py index 5620b0c9..8ada556e 100644 --- a/generated/groundlight_openapi_client/model/detector_group.py +++ b/generated/groundlight_openapi_client/model/detector_group.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group_request.py b/generated/groundlight_openapi_client/model/detector_group_request.py index 3302860f..95131dd1 100644 --- a/generated/groundlight_openapi_client/model/detector_group_request.py +++ b/generated/groundlight_openapi_client/model/detector_group_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_type_enum.py b/generated/groundlight_openapi_client/model/detector_type_enum.py index 94d446da..8b9a770d 100644 --- a/generated/groundlight_openapi_client/model/detector_type_enum.py +++ b/generated/groundlight_openapi_client/model/detector_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/edge_model_info.py b/generated/groundlight_openapi_client/model/edge_model_info.py index 66826368..635ee58c 100644 --- a/generated/groundlight_openapi_client/model/edge_model_info.py +++ b/generated/groundlight_openapi_client/model/edge_model_info.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/escalation_type_enum.py b/generated/groundlight_openapi_client/model/escalation_type_enum.py index d28cb1ed..0a9335dc 100644 --- a/generated/groundlight_openapi_client/model/escalation_type_enum.py +++ b/generated/groundlight_openapi_client/model/escalation_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index 79cf0179..80b967b3 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query_type_enum.py b/generated/groundlight_openapi_client/model/image_query_type_enum.py index 42470730..f1c2367a 100644 --- a/generated/groundlight_openapi_client/model/image_query_type_enum.py +++ b/generated/groundlight_openapi_client/model/image_query_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200.py b/generated/groundlight_openapi_client/model/inline_response200.py index 62e2494f..2a04fb2b 100644 --- a/generated/groundlight_openapi_client/model/inline_response200.py +++ b/generated/groundlight_openapi_client/model/inline_response200.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response2001.py b/generated/groundlight_openapi_client/model/inline_response2001.py index cdb30032..45c049c5 100644 --- a/generated/groundlight_openapi_client/model/inline_response2001.py +++ b/generated/groundlight_openapi_client/model/inline_response2001.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py index 987a8fcc..d6e619ab 100644 --- a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py +++ b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -94,16 +94,33 @@ def openapi_types(): none_type, ), # noqa: E501 "kfold_pooled__balanced_accuracy": (float,), # noqa: E501 - "kfold_pooled__positive_accuracy": (float,), # noqa: E501 - "kfold_pooled__negative_accuracy": (float,), # noqa: E501 + "kfold_pooled__positive_accuracy": ( + float, + none_type, + ), # noqa: E501 + "kfold_pooled__negative_accuracy": ( + float, + none_type, + ), # noqa: E501 + "precision": ( + float, + none_type, + ), # noqa: E501 + "recall": ( + float, + none_type, + ), # noqa: E501 "balanced_system_accuracies": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, + none_type, ), # noqa: E501 "positive_system_accuracies": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, + none_type, ), # noqa: E501 "negative_system_accuracies": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, + none_type, ), # noqa: E501 } @@ -117,6 +134,8 @@ def discriminator(): "kfold_pooled__balanced_accuracy": "kfold_pooled__balanced_accuracy", # noqa: E501 "kfold_pooled__positive_accuracy": "kfold_pooled__positive_accuracy", # noqa: E501 "kfold_pooled__negative_accuracy": "kfold_pooled__negative_accuracy", # noqa: E501 + "precision": "precision", # noqa: E501 + "recall": "recall", # noqa: E501 "balanced_system_accuracies": "balanced_system_accuracies", # noqa: E501 "positive_system_accuracies": "positive_system_accuracies", # noqa: E501 "negative_system_accuracies": "negative_system_accuracies", # noqa: E501 @@ -165,11 +184,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 eval_timestamp (datetime): [optional] # noqa: E501 total_ground_truth_examples (int, none_type): [optional] # noqa: E501 kfold_pooled__balanced_accuracy (float): [optional] # noqa: E501 - kfold_pooled__positive_accuracy (float): [optional] # noqa: E501 - kfold_pooled__negative_accuracy (float): [optional] # noqa: E501 - balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 - positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 - negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 + kfold_pooled__positive_accuracy (float, none_type): [optional] # noqa: E501 + kfold_pooled__negative_accuracy (float, none_type): [optional] # noqa: E501 + precision (float, none_type): [optional] # noqa: E501 + recall (float, none_type): [optional] # noqa: E501 + balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 + positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 + negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -257,11 +278,13 @@ def __init__(self, *args, **kwargs): # noqa: E501 eval_timestamp (datetime): [optional] # noqa: E501 total_ground_truth_examples (int, none_type): [optional] # noqa: E501 kfold_pooled__balanced_accuracy (float): [optional] # noqa: E501 - kfold_pooled__positive_accuracy (float): [optional] # noqa: E501 - kfold_pooled__negative_accuracy (float): [optional] # noqa: E501 - balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 - positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 - negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): [optional] # noqa: E501 + kfold_pooled__positive_accuracy (float, none_type): [optional] # noqa: E501 + kfold_pooled__negative_accuracy (float, none_type): [optional] # noqa: E501 + precision (float, none_type): [optional] # noqa: E501 + recall (float, none_type): [optional] # noqa: E501 + balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 + positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 + negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/inline_response2002.py b/generated/groundlight_openapi_client/model/inline_response2002.py index 1fbfd51d..cc2ddd6c 100644 --- a/generated/groundlight_openapi_client/model/inline_response2002.py +++ b/generated/groundlight_openapi_client/model/inline_response2002.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200_summary.py b/generated/groundlight_openapi_client/model/inline_response200_summary.py index d5c30a12..278368a6 100644 --- a/generated/groundlight_openapi_client/model/inline_response200_summary.py +++ b/generated/groundlight_openapi_client/model/inline_response200_summary.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py b/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py index b2eb180b..e30fffbf 100644 --- a/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py +++ b/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/label_value.py b/generated/groundlight_openapi_client/model/label_value.py index d2981b5d..8361b9a2 100644 --- a/generated/groundlight_openapi_client/model/label_value.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index 434f8ad8..a940f5a1 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py index b35a3833..6c477520 100644 --- a/generated/groundlight_openapi_client/model/mode_enum.py +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py index f08ee27c..8a47db85 100644 --- a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_classification_result.py b/generated/groundlight_openapi_client/model/multi_classification_result.py index ddd68bd7..e2b1d822 100644 --- a/generated/groundlight_openapi_client/model/multi_classification_result.py +++ b/generated/groundlight_openapi_client/model/multi_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/note.py b/generated/groundlight_openapi_client/model/note.py index 64f139a7..6fdffe42 100644 --- a/generated/groundlight_openapi_client/model/note.py +++ b/generated/groundlight_openapi_client/model/note.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/note_request.py b/generated/groundlight_openapi_client/model/note_request.py index 95889010..0df28f64 100644 --- a/generated/groundlight_openapi_client/model/note_request.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_detector_list.py b/generated/groundlight_openapi_client/model/paginated_detector_list.py index 5972205e..8a657572 100644 --- a/generated/groundlight_openapi_client/model/paginated_detector_list.py +++ b/generated/groundlight_openapi_client/model/paginated_detector_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_image_query_list.py b/generated/groundlight_openapi_client/model/paginated_image_query_list.py index 13dccab7..c8a91fab 100644 --- a/generated/groundlight_openapi_client/model/paginated_image_query_list.py +++ b/generated/groundlight_openapi_client/model/paginated_image_query_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_rule_list.py b/generated/groundlight_openapi_client/model/paginated_rule_list.py index 6d007e1d..348b454d 100644 --- a/generated/groundlight_openapi_client/model/paginated_rule_list.py +++ b/generated/groundlight_openapi_client/model/paginated_rule_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 251cb75d..b6a81c4d 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/payload_template.py b/generated/groundlight_openapi_client/model/payload_template.py index a721bb4c..c4bae06c 100644 --- a/generated/groundlight_openapi_client/model/payload_template.py +++ b/generated/groundlight_openapi_client/model/payload_template.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/payload_template_request.py b/generated/groundlight_openapi_client/model/payload_template_request.py index 3a0f12a2..adc9050f 100644 --- a/generated/groundlight_openapi_client/model/payload_template_request.py +++ b/generated/groundlight_openapi_client/model/payload_template_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/result_type_enum.py b/generated/groundlight_openapi_client/model/result_type_enum.py index c4b954fd..5c1f2c7c 100644 --- a/generated/groundlight_openapi_client/model/result_type_enum.py +++ b/generated/groundlight_openapi_client/model/result_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi.py b/generated/groundlight_openapi_client/model/roi.py index 74c4fc66..443d0c07 100644 --- a/generated/groundlight_openapi_client/model/roi.py +++ b/generated/groundlight_openapi_client/model/roi.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi_request.py b/generated/groundlight_openapi_client/model/roi_request.py index 07fb54e2..366c673b 100644 --- a/generated/groundlight_openapi_client/model/roi_request.py +++ b/generated/groundlight_openapi_client/model/roi_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule.py b/generated/groundlight_openapi_client/model/rule.py index 7f1be14a..874ab7f3 100644 --- a/generated/groundlight_openapi_client/model/rule.py +++ b/generated/groundlight_openapi_client/model/rule.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule_request.py b/generated/groundlight_openapi_client/model/rule_request.py index 8fdf8fe1..9fb7ba11 100644 --- a/generated/groundlight_openapi_client/model/rule_request.py +++ b/generated/groundlight_openapi_client/model/rule_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py index f5586bb6..63eb1c41 100644 --- a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py +++ b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py index 4addbc10..9ad77b6c 100644 --- a/generated/groundlight_openapi_client/model/source_enum.py +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/status_enum.py b/generated/groundlight_openapi_client/model/status_enum.py index b41c2871..7d697180 100644 --- a/generated/groundlight_openapi_client/model/status_enum.py +++ b/generated/groundlight_openapi_client/model/status_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/text_mode_configuration.py b/generated/groundlight_openapi_client/model/text_mode_configuration.py index ee4c4e73..67897d83 100644 --- a/generated/groundlight_openapi_client/model/text_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/text_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/text_recognition_result.py b/generated/groundlight_openapi_client/model/text_recognition_result.py index 25add8f1..de07c128 100644 --- a/generated/groundlight_openapi_client/model/text_recognition_result.py +++ b/generated/groundlight_openapi_client/model/text_recognition_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/verb_enum.py b/generated/groundlight_openapi_client/model/verb_enum.py index 8d138449..e3f77857 100644 --- a/generated/groundlight_openapi_client/model/verb_enum.py +++ b/generated/groundlight_openapi_client/model/verb_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/webhook_action.py b/generated/groundlight_openapi_client/model/webhook_action.py index 33f70f3c..8bd4fc73 100644 --- a/generated/groundlight_openapi_client/model/webhook_action.py +++ b/generated/groundlight_openapi_client/model/webhook_action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/webhook_action_request.py b/generated/groundlight_openapi_client/model/webhook_action_request.py index cd98a99b..1cf055f8 100644 --- a/generated/groundlight_openapi_client/model/webhook_action_request.py +++ b/generated/groundlight_openapi_client/model/webhook_action_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model_utils.py b/generated/groundlight_openapi_client/model_utils.py index cf7bd6d4..660d902a 100644 --- a/generated/groundlight_openapi_client/model_utils.py +++ b/generated/groundlight_openapi_client/model_utils.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/rest.py b/generated/groundlight_openapi_client/rest.py index 16d8ca86..14aa376b 100644 --- a/generated/groundlight_openapi_client/rest.py +++ b/generated/groundlight_openapi_client/rest.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/model.py b/generated/model.py index 480c1e0d..c1610ee0 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-03-31T23:28:33+00:00 +# timestamp: 2025-04-02T04:06:49+00:00 from __future__ import annotations diff --git a/generated/setup.py b/generated/setup.py index 9c4bb572..96e1f78a 100644 --- a/generated/setup.py +++ b/generated/setup.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.2 + The version of the OpenAPI document: 0.18.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 1859dbf0..5982ba40 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: Groundlight API - version: 0.18.2 + version: 0.18.3 description: Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. @@ -407,18 +407,31 @@ paths: kfold_pooled__positive_accuracy: type: number format: float + nullable: true kfold_pooled__negative_accuracy: type: number format: float + nullable: true + precision: + type: number + format: float + nullable: true + recall: + type: number + format: float + nullable: true balanced_system_accuracies: type: object additionalProperties: true + nullable: true positive_system_accuracies: type: object additionalProperties: true + nullable: true negative_system_accuracies: type: object additionalProperties: true + nullable: true nullable: true description: '' /v1/edge/fetch-model-urls/{detector_id}/: From 4fd6e887884387ef8197e1fecc212c36f762573e Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Wed, 2 Apr 2025 04:13:42 +0000 Subject: [PATCH 02/13] Remove premature precision and recall from evaluation results --- .../docs/InlineResponse2001EvaluationResults.md | 2 -- .../inline_response2001_evaluation_results.py | 14 -------------- generated/model.py | 2 +- spec/public-api.yaml | 8 -------- 4 files changed, 1 insertion(+), 25 deletions(-) diff --git a/generated/docs/InlineResponse2001EvaluationResults.md b/generated/docs/InlineResponse2001EvaluationResults.md index 1c0be988..ed507395 100644 --- a/generated/docs/InlineResponse2001EvaluationResults.md +++ b/generated/docs/InlineResponse2001EvaluationResults.md @@ -9,8 +9,6 @@ Name | Type | Description | Notes **kfold_pooled__balanced_accuracy** | **float** | | [optional] **kfold_pooled__positive_accuracy** | **float, none_type** | | [optional] **kfold_pooled__negative_accuracy** | **float, none_type** | | [optional] -**precision** | **float, none_type** | | [optional] -**recall** | **float, none_type** | | [optional] **balanced_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] **positive_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] **negative_system_accuracies** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [optional] diff --git a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py index d6e619ab..bd9ce9cc 100644 --- a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py +++ b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py @@ -102,14 +102,6 @@ def openapi_types(): float, none_type, ), # noqa: E501 - "precision": ( - float, - none_type, - ), # noqa: E501 - "recall": ( - float, - none_type, - ), # noqa: E501 "balanced_system_accuracies": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, @@ -134,8 +126,6 @@ def discriminator(): "kfold_pooled__balanced_accuracy": "kfold_pooled__balanced_accuracy", # noqa: E501 "kfold_pooled__positive_accuracy": "kfold_pooled__positive_accuracy", # noqa: E501 "kfold_pooled__negative_accuracy": "kfold_pooled__negative_accuracy", # noqa: E501 - "precision": "precision", # noqa: E501 - "recall": "recall", # noqa: E501 "balanced_system_accuracies": "balanced_system_accuracies", # noqa: E501 "positive_system_accuracies": "positive_system_accuracies", # noqa: E501 "negative_system_accuracies": "negative_system_accuracies", # noqa: E501 @@ -186,8 +176,6 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 kfold_pooled__balanced_accuracy (float): [optional] # noqa: E501 kfold_pooled__positive_accuracy (float, none_type): [optional] # noqa: E501 kfold_pooled__negative_accuracy (float, none_type): [optional] # noqa: E501 - precision (float, none_type): [optional] # noqa: E501 - recall (float, none_type): [optional] # noqa: E501 balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 @@ -280,8 +268,6 @@ def __init__(self, *args, **kwargs): # noqa: E501 kfold_pooled__balanced_accuracy (float): [optional] # noqa: E501 kfold_pooled__positive_accuracy (float, none_type): [optional] # noqa: E501 kfold_pooled__negative_accuracy (float, none_type): [optional] # noqa: E501 - precision (float, none_type): [optional] # noqa: E501 - recall (float, none_type): [optional] # noqa: E501 balanced_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 positive_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 negative_system_accuracies ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): [optional] # noqa: E501 diff --git a/generated/model.py b/generated/model.py index c1610ee0..642ea9e8 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-04-02T04:06:49+00:00 +# timestamp: 2025-04-02T04:12:45+00:00 from __future__ import annotations diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 5982ba40..e14768b7 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -412,14 +412,6 @@ paths: type: number format: float nullable: true - precision: - type: number - format: float - nullable: true - recall: - type: number - format: float - nullable: true balanced_system_accuracies: type: object additionalProperties: true From 66e3547096a3d6006b430ae313b28f7537b358b5 Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Wed, 2 Apr 2025 20:54:24 +0000 Subject: [PATCH 03/13] Update public-api.yaml over from internal codebase and make generate --- generated/README.md | 4 +- generated/docs/ActionsApi.md | 2 +- generated/docs/AnnotationsRequestedEnum.md | 3 +- generated/docs/BinaryClassificationResult.md | 2 +- generated/docs/BoundingBoxResult.md | 2 +- generated/docs/Condition.md | 2 +- generated/docs/ConditionRequest.md | 2 +- generated/docs/CountingResult.md | 2 +- generated/docs/Detector.md | 4 +- generated/docs/DetectorsApi.md | 2 +- generated/docs/EscalationTypeEnum.md | 3 +- generated/docs/LabelValue.md | 4 +- generated/docs/MultiClassificationResult.md | 2 +- generated/docs/PatchedDetectorRequest.md | 2 +- generated/docs/SourceEnum.md | 3 +- generated/docs/TextRecognitionResult.md | 2 +- generated/docs/VerbEnum.md | 3 +- .../groundlight_openapi_client/__init__.py | 2 +- .../api/actions_api.py | 2 +- .../api/detector_groups_api.py | 2 +- .../api/detector_reset_api.py | 2 +- .../api/detectors_api.py | 2 +- .../api/edge_api.py | 2 +- .../api/image_queries_api.py | 2 +- .../api/labels_api.py | 2 +- .../api/notes_api.py | 2 +- .../api/user_api.py | 2 +- .../groundlight_openapi_client/api_client.py | 2 +- .../configuration.py | 4 +- .../groundlight_openapi_client/exceptions.py | 2 +- .../model/action.py | 2 +- .../model/action_list.py | 2 +- .../model/all_notes.py | 2 +- .../model/annotations_requested_enum.py | 10 +- .../model/b_box_geometry.py | 2 +- .../model/b_box_geometry_request.py | 2 +- .../model/binary_classification_result.py | 25 +-- .../model/blank_enum.py | 2 +- .../model/bounding_box_mode_configuration.py | 2 +- .../model/bounding_box_result.py | 25 +-- .../model/channel_enum.py | 2 +- .../model/condition.py | 16 +- .../model/condition_request.py | 16 +- .../model/count_mode_configuration.py | 2 +- .../model/counting_result.py | 20 +- .../model/detector.py | 36 +--- .../model/detector_creation_input_request.py | 2 +- .../model/detector_group.py | 2 +- .../model/detector_group_request.py | 2 +- .../model/detector_type_enum.py | 2 +- .../model/edge_model_info.py | 2 +- .../model/escalation_type_enum.py | 10 +- .../model/image_query.py | 2 +- .../model/image_query_type_enum.py | 2 +- .../model/inline_response200.py | 2 +- .../model/inline_response2001.py | 2 +- .../inline_response2001_evaluation_results.py | 2 +- .../model/inline_response2002.py | 2 +- .../model/inline_response200_summary.py | 2 +- ...inline_response200_summary_class_counts.py | 2 +- .../model/label_value.py | 24 +-- .../model/label_value_request.py | 2 +- .../model/mode_enum.py | 2 +- .../model/multi_class_mode_configuration.py | 2 +- .../model/multi_classification_result.py | 20 +- .../groundlight_openapi_client/model/note.py | 2 +- .../model/note_request.py | 2 +- .../model/paginated_detector_list.py | 2 +- .../model/paginated_image_query_list.py | 2 +- .../model/paginated_rule_list.py | 2 +- .../model/patched_detector_request.py | 23 +- .../model/payload_template.py | 2 +- .../model/payload_template_request.py | 2 +- .../model/result_type_enum.py | 2 +- .../groundlight_openapi_client/model/roi.py | 2 +- .../model/roi_request.py | 2 +- .../groundlight_openapi_client/model/rule.py | 2 +- .../model/rule_request.py | 2 +- .../model/snooze_time_unit_enum.py | 2 +- .../model/source_enum.py | 10 +- .../model/status_enum.py | 2 +- .../model/text_mode_configuration.py | 2 +- .../model/text_recognition_result.py | 20 +- .../model/verb_enum.py | 10 +- .../model/webhook_action.py | 2 +- .../model/webhook_action_request.py | 2 +- .../groundlight_openapi_client/model_utils.py | 2 +- generated/groundlight_openapi_client/rest.py | 2 +- generated/model.py | 196 +++++------------- generated/setup.py | 2 +- spec/public-api.yaml | 173 ++++------------ 91 files changed, 242 insertions(+), 554 deletions(-) diff --git a/generated/README.md b/generated/README.md index b7fddd47..086d2c24 100644 --- a/generated/README.md +++ b/generated/README.md @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.18.3 +- API version: 0.18.2 - Package version: 1.0.0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen @@ -84,7 +84,7 @@ rule_request = RuleRequest( snooze_time_unit=None, human_review_required=False, condition=ConditionRequest( - verb=VerbEnum("ANSWERED_CONSECUTIVELY"), + verb="verb_example", parameters={ "key": None, }, diff --git a/generated/docs/ActionsApi.md b/generated/docs/ActionsApi.md index ae3b580b..099d468f 100644 --- a/generated/docs/ActionsApi.md +++ b/generated/docs/ActionsApi.md @@ -59,7 +59,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: snooze_time_unit=None, human_review_required=False, condition=ConditionRequest( - verb=VerbEnum("ANSWERED_CONSECUTIVELY"), + verb="verb_example", parameters={ "key": None, }, diff --git a/generated/docs/AnnotationsRequestedEnum.md b/generated/docs/AnnotationsRequestedEnum.md index fd366f01..31b444e4 100644 --- a/generated/docs/AnnotationsRequestedEnum.md +++ b/generated/docs/AnnotationsRequestedEnum.md @@ -1,11 +1,10 @@ # AnnotationsRequestedEnum -* `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes | must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] +**value** | **str** | | must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/BinaryClassificationResult.md b/generated/docs/BinaryClassificationResult.md index a98a3b70..0ddc4f9e 100644 --- a/generated/docs/BinaryClassificationResult.md +++ b/generated/docs/BinaryClassificationResult.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **label** | **str** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] -**result_type** | **str** | | [optional] if omitted the server will use the default value of "binary_classification" +**result_type** | **str** | | [optional] **from_edge** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/BoundingBoxResult.md b/generated/docs/BoundingBoxResult.md index 7a089d95..aab5d925 100644 --- a/generated/docs/BoundingBoxResult.md +++ b/generated/docs/BoundingBoxResult.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **label** | **str** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] -**result_type** | **str** | | [optional] if omitted the server will use the default value of "bounding_box" +**result_type** | **str** | | [optional] **from_edge** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/Condition.md b/generated/docs/Condition.md index edfe01a8..e54e1ad5 100644 --- a/generated/docs/Condition.md +++ b/generated/docs/Condition.md @@ -4,7 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**verb** | [**VerbEnum**](VerbEnum.md) | | +**verb** | **str** | | **parameters** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/ConditionRequest.md b/generated/docs/ConditionRequest.md index a9a2cead..60a6d034 100644 --- a/generated/docs/ConditionRequest.md +++ b/generated/docs/ConditionRequest.md @@ -4,7 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**verb** | [**VerbEnum**](VerbEnum.md) | | +**verb** | **str** | | **parameters** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/CountingResult.md b/generated/docs/CountingResult.md index f9db3d5d..deb8cbe6 100644 --- a/generated/docs/CountingResult.md +++ b/generated/docs/CountingResult.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **count** | **int, none_type** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] -**result_type** | **str** | | [optional] if omitted the server will use the default value of "counting" +**result_type** | **str** | | [optional] **from_edge** | **bool** | | [optional] **greater_than_max** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/Detector.md b/generated/docs/Detector.md index 133bfeba..b2f9f86e 100644 --- a/generated/docs/Detector.md +++ b/generated/docs/Detector.md @@ -12,12 +12,12 @@ Name | Type | Description | Notes **query** | **str** | A question about the image. | [readonly] **group_name** | **str** | Which group should this detector be part of? | [readonly] **metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | Metadata about the detector. | [readonly] -**mode** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] +**mode** | **str** | | [readonly] **mode_configuration** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [readonly] **confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 **patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 **status** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] -**escalation_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | [optional] +**escalation_type** | **str** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/DetectorsApi.md b/generated/docs/DetectorsApi.md index 51cc1d1c..9f5355e5 100644 --- a/generated/docs/DetectorsApi.md +++ b/generated/docs/DetectorsApi.md @@ -532,7 +532,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: confidence_threshold=0.9, patience_time=30.0, status=None, - escalation_type=None, + escalation_type="escalation_type_example", ) # PatchedDetectorRequest | (optional) # example passing only required values which don't have defaults set diff --git a/generated/docs/EscalationTypeEnum.md b/generated/docs/EscalationTypeEnum.md index 36e1e46e..d7842681 100644 --- a/generated/docs/EscalationTypeEnum.md +++ b/generated/docs/EscalationTypeEnum.md @@ -1,11 +1,10 @@ # EscalationTypeEnum -* `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | must be one of ["STANDARD", "NO_HUMAN_LABELING", ] +**value** | **str** | | must be one of ["STANDARD", "NO_HUMAN_LABELING", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/LabelValue.md b/generated/docs/LabelValue.md index acbb0e6f..da087687 100644 --- a/generated/docs/LabelValue.md +++ b/generated/docs/LabelValue.md @@ -6,10 +6,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **confidence** | **float, none_type** | | [readonly] **class_name** | **str, none_type** | Return a human-readable class name for this label (e.g. YES/NO) | [readonly] -**annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly] +**annotations_requested** | **[str]** | | [readonly] **created_at** | **datetime** | | [readonly] **detector_id** | **int, none_type** | | [readonly] -**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] +**source** | **str** | | [readonly] **text** | **str, none_type** | Text annotations | [readonly] **rois** | [**[ROI], none_type**](ROI.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/MultiClassificationResult.md b/generated/docs/MultiClassificationResult.md index d048a5da..04aad82f 100644 --- a/generated/docs/MultiClassificationResult.md +++ b/generated/docs/MultiClassificationResult.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **label** | **str** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] -**result_type** | **str** | | [optional] if omitted the server will use the default value of "multi_classification" +**result_type** | **str** | | [optional] **from_edge** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/PatchedDetectorRequest.md b/generated/docs/PatchedDetectorRequest.md index 819310b8..eb594561 100644 --- a/generated/docs/PatchedDetectorRequest.md +++ b/generated/docs/PatchedDetectorRequest.md @@ -9,7 +9,7 @@ Name | Type | Description | Notes **confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 **patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 **status** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] -**escalation_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | [optional] +**escalation_type** | **str** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/SourceEnum.md b/generated/docs/SourceEnum.md index 65fe757d..a382414e 100644 --- a/generated/docs/SourceEnum.md +++ b/generated/docs/SourceEnum.md @@ -1,11 +1,10 @@ # SourceEnum -* `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE | must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] +**value** | **str** | | must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/TextRecognitionResult.md b/generated/docs/TextRecognitionResult.md index f518a26b..736bec34 100644 --- a/generated/docs/TextRecognitionResult.md +++ b/generated/docs/TextRecognitionResult.md @@ -8,7 +8,7 @@ Name | Type | Description | Notes **truncated** | **bool** | | **confidence** | **float, none_type** | | [optional] **source** | **str** | | [optional] -**result_type** | **str** | | [optional] if omitted the server will use the default value of "text_recognition" +**result_type** | **str** | | [optional] **from_edge** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/VerbEnum.md b/generated/docs/VerbEnum.md index a76351de..57a334c2 100644 --- a/generated/docs/VerbEnum.md +++ b/generated/docs/VerbEnum.md @@ -1,11 +1,10 @@ # VerbEnum -* `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES | must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] +**value** | **str** | | must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/groundlight_openapi_client/__init__.py b/generated/groundlight_openapi_client/__init__.py index 45ed51c2..ff1e9a1d 100644 --- a/generated/groundlight_openapi_client/__init__.py +++ b/generated/groundlight_openapi_client/__init__.py @@ -5,7 +5,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/actions_api.py b/generated/groundlight_openapi_client/api/actions_api.py index 0dbc94cb..4f9d42f5 100644 --- a/generated/groundlight_openapi_client/api/actions_api.py +++ b/generated/groundlight_openapi_client/api/actions_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_groups_api.py b/generated/groundlight_openapi_client/api/detector_groups_api.py index 91ada22a..83a2cb4f 100644 --- a/generated/groundlight_openapi_client/api/detector_groups_api.py +++ b/generated/groundlight_openapi_client/api/detector_groups_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_reset_api.py b/generated/groundlight_openapi_client/api/detector_reset_api.py index cc51c357..c50532f8 100644 --- a/generated/groundlight_openapi_client/api/detector_reset_api.py +++ b/generated/groundlight_openapi_client/api/detector_reset_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index 33a41d48..eca18f45 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/edge_api.py b/generated/groundlight_openapi_client/api/edge_api.py index a0f2b212..a0b3187d 100644 --- a/generated/groundlight_openapi_client/api/edge_api.py +++ b/generated/groundlight_openapi_client/api/edge_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/image_queries_api.py b/generated/groundlight_openapi_client/api/image_queries_api.py index 8cb2d80e..03ad2600 100644 --- a/generated/groundlight_openapi_client/api/image_queries_api.py +++ b/generated/groundlight_openapi_client/api/image_queries_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/labels_api.py b/generated/groundlight_openapi_client/api/labels_api.py index 27d4c37f..fd948a9f 100644 --- a/generated/groundlight_openapi_client/api/labels_api.py +++ b/generated/groundlight_openapi_client/api/labels_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index 287cad2b..f1af2cb5 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/user_api.py b/generated/groundlight_openapi_client/api/user_api.py index 6e6481ea..a14200f2 100644 --- a/generated/groundlight_openapi_client/api/user_api.py +++ b/generated/groundlight_openapi_client/api/user_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api_client.py b/generated/groundlight_openapi_client/api_client.py index 2e8e4d56..da2e348a 100644 --- a/generated/groundlight_openapi_client/api_client.py +++ b/generated/groundlight_openapi_client/api_client.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/configuration.py b/generated/groundlight_openapi_client/configuration.py index c8c0a864..654b832f 100644 --- a/generated/groundlight_openapi_client/configuration.py +++ b/generated/groundlight_openapi_client/configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -419,7 +419,7 @@ def to_debug_report(self): "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" - "Version of the API: 0.18.3\n" + "Version of the API: 0.18.2\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) ) diff --git a/generated/groundlight_openapi_client/exceptions.py b/generated/groundlight_openapi_client/exceptions.py index c29a0632..393dbba8 100644 --- a/generated/groundlight_openapi_client/exceptions.py +++ b/generated/groundlight_openapi_client/exceptions.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action.py b/generated/groundlight_openapi_client/model/action.py index 139a4111..8308d44b 100644 --- a/generated/groundlight_openapi_client/model/action.py +++ b/generated/groundlight_openapi_client/model/action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action_list.py b/generated/groundlight_openapi_client/model/action_list.py index e2233883..a38fae04 100644 --- a/generated/groundlight_openapi_client/model/action_list.py +++ b/generated/groundlight_openapi_client/model/action_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/all_notes.py b/generated/groundlight_openapi_client/model/all_notes.py index 122e0fe8..89da5af1 100644 --- a/generated/groundlight_openapi_client/model/all_notes.py +++ b/generated/groundlight_openapi_client/model/all_notes.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/annotations_requested_enum.py b/generated/groundlight_openapi_client/model/annotations_requested_enum.py index 3773d934..1e1f6b8f 100644 --- a/generated/groundlight_openapi_client/model/annotations_requested_enum.py +++ b/generated/groundlight_openapi_client/model/annotations_requested_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -102,10 +102,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + args[0] (str):, must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 Keyword Args: - value (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + value (str):, must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -194,10 +194,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + args[0] (str):, must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 Keyword Args: - value (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + value (str):, must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/b_box_geometry.py b/generated/groundlight_openapi_client/model/b_box_geometry.py index 2e806115..2282dfd7 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry_request.py b/generated/groundlight_openapi_client/model/b_box_geometry_request.py index e1e8e73b..fcac579f 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry_request.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/binary_classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py index d34681f1..605685dc 100644 --- a/generated/groundlight_openapi_client/model/binary_classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,24 +53,7 @@ class BinaryClassificationResult(ModelNormal): as additional properties values. """ - allowed_values = { - ("label",): { - "YES": "YES", - "NO": "NO", - "UNCLEAR": "UNCLEAR", - }, - ("source",): { - "STILL_PROCESSING": "STILL_PROCESSING", - "CLOUD": "CLOUD", - "USER": "USER", - "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", - "ALGORITHM": "ALGORITHM", - "EDGE": "EDGE", - }, - ("result_type",): { - "BINARY_CLASSIFICATION": "binary_classification", - }, - } + allowed_values = {} validations = { ("confidence",): { @@ -177,7 +160,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "binary_classification" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ @@ -269,7 +252,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "binary_classification" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/blank_enum.py b/generated/groundlight_openapi_client/model/blank_enum.py index 31919708..aa466bb8 100644 --- a/generated/groundlight_openapi_client/model/blank_enum.py +++ b/generated/groundlight_openapi_client/model/blank_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py b/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py index f78803b0..bcf98f27 100644 --- a/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/bounding_box_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/bounding_box_result.py b/generated/groundlight_openapi_client/model/bounding_box_result.py index 2a80e3d2..be4daaba 100644 --- a/generated/groundlight_openapi_client/model/bounding_box_result.py +++ b/generated/groundlight_openapi_client/model/bounding_box_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,24 +53,7 @@ class BoundingBoxResult(ModelNormal): as additional properties values. """ - allowed_values = { - ("label",): { - "BOUNDING_BOX": "BOUNDING_BOX", - "GREATER_THAN_MAX": "GREATER_THAN_MAX", - "UNCLEAR": "UNCLEAR", - }, - ("source",): { - "STILL_PROCESSING": "STILL_PROCESSING", - "CLOUD": "CLOUD", - "USER": "USER", - "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", - "ALGORITHM": "ALGORITHM", - "EDGE": "EDGE", - }, - ("result_type",): { - "BOUNDING_BOX": "bounding_box", - }, - } + allowed_values = {} validations = { ("confidence",): { @@ -177,7 +160,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "bounding_box" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ @@ -269,7 +252,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "bounding_box" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/channel_enum.py b/generated/groundlight_openapi_client/model/channel_enum.py index 2ad2285d..720dac0d 100644 --- a/generated/groundlight_openapi_client/model/channel_enum.py +++ b/generated/groundlight_openapi_client/model/channel_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition.py b/generated/groundlight_openapi_client/model/condition.py index abdc7c4a..65a2861f 100644 --- a/generated/groundlight_openapi_client/model/condition.py +++ b/generated/groundlight_openapi_client/model/condition.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,12 +29,6 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -def lazy_import(): - from groundlight_openapi_client.model.verb_enum import VerbEnum - - globals()["VerbEnum"] = VerbEnum - - class Condition(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -69,7 +63,6 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ - lazy_import() return ( bool, date, @@ -94,9 +87,8 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ - lazy_import() return { - "verb": (VerbEnum,), # noqa: E501 + "verb": (str,), # noqa: E501 "parameters": ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501 } @@ -119,7 +111,7 @@ def _from_openapi_data(cls, verb, parameters, *args, **kwargs): # noqa: E501 """Condition - a model defined in OpenAPI Args: - verb (VerbEnum): + verb (str): parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Keyword Args: @@ -209,7 +201,7 @@ def __init__(self, verb, parameters, *args, **kwargs): # noqa: E501 """Condition - a model defined in OpenAPI Args: - verb (VerbEnum): + verb (str): parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Keyword Args: diff --git a/generated/groundlight_openapi_client/model/condition_request.py b/generated/groundlight_openapi_client/model/condition_request.py index dadfd994..674578a6 100644 --- a/generated/groundlight_openapi_client/model/condition_request.py +++ b/generated/groundlight_openapi_client/model/condition_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,12 +29,6 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -def lazy_import(): - from groundlight_openapi_client.model.verb_enum import VerbEnum - - globals()["VerbEnum"] = VerbEnum - - class ConditionRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -69,7 +63,6 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ - lazy_import() return ( bool, date, @@ -94,9 +87,8 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ - lazy_import() return { - "verb": (VerbEnum,), # noqa: E501 + "verb": (str,), # noqa: E501 "parameters": ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501 } @@ -119,7 +111,7 @@ def _from_openapi_data(cls, verb, parameters, *args, **kwargs): # noqa: E501 """ConditionRequest - a model defined in OpenAPI Args: - verb (VerbEnum): + verb (str): parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Keyword Args: @@ -209,7 +201,7 @@ def __init__(self, verb, parameters, *args, **kwargs): # noqa: E501 """ConditionRequest - a model defined in OpenAPI Args: - verb (VerbEnum): + verb (str): parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Keyword Args: diff --git a/generated/groundlight_openapi_client/model/count_mode_configuration.py b/generated/groundlight_openapi_client/model/count_mode_configuration.py index 0068e9e9..9c2d3be9 100644 --- a/generated/groundlight_openapi_client/model/count_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/count_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/counting_result.py b/generated/groundlight_openapi_client/model/counting_result.py index 23266160..08d2f6db 100644 --- a/generated/groundlight_openapi_client/model/counting_result.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,19 +53,7 @@ class CountingResult(ModelNormal): as additional properties values. """ - allowed_values = { - ("source",): { - "STILL_PROCESSING": "STILL_PROCESSING", - "CLOUD": "CLOUD", - "USER": "USER", - "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", - "ALGORITHM": "ALGORITHM", - "EDGE": "EDGE", - }, - ("result_type",): { - "COUNTING": "counting", - }, - } + allowed_values = {} validations = { ("count",): { @@ -180,7 +168,7 @@ def _from_openapi_data(cls, count, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "counting" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ @@ -273,7 +261,7 @@ def __init__(self, count, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "counting" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index 99d4c04d..2cde7991 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -32,14 +32,10 @@ def lazy_import(): from groundlight_openapi_client.model.blank_enum import BlankEnum from groundlight_openapi_client.model.detector_type_enum import DetectorTypeEnum - from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum - from groundlight_openapi_client.model.mode_enum import ModeEnum from groundlight_openapi_client.model.status_enum import StatusEnum globals()["BlankEnum"] = BlankEnum globals()["DetectorTypeEnum"] = DetectorTypeEnum - globals()["EscalationTypeEnum"] = EscalationTypeEnum - globals()["ModeEnum"] = ModeEnum globals()["StatusEnum"] = StatusEnum @@ -136,17 +132,7 @@ def openapi_types(): {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, ), # noqa: E501 - "mode": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 + "mode": (str,), # noqa: E501 "mode_configuration": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, @@ -164,17 +150,7 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "escalation_type": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 + "escalation_type": (str,), # noqa: E501 } @cached_property @@ -225,7 +201,7 @@ def _from_openapi_data( query (str): A question about the image. group_name (str): Which group should this detector be part of? metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Metadata about the detector. - mode (bool, date, datetime, dict, float, int, list, str, none_type): + mode (str): mode_configuration ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Keyword Args: @@ -262,7 +238,7 @@ def _from_openapi_data( confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + escalation_type (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -360,7 +336,7 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + escalation_type (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/detector_creation_input_request.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py index dc8f5011..ca1b1563 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input_request.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group.py b/generated/groundlight_openapi_client/model/detector_group.py index 8ada556e..5620b0c9 100644 --- a/generated/groundlight_openapi_client/model/detector_group.py +++ b/generated/groundlight_openapi_client/model/detector_group.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group_request.py b/generated/groundlight_openapi_client/model/detector_group_request.py index 95131dd1..3302860f 100644 --- a/generated/groundlight_openapi_client/model/detector_group_request.py +++ b/generated/groundlight_openapi_client/model/detector_group_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_type_enum.py b/generated/groundlight_openapi_client/model/detector_type_enum.py index 8b9a770d..94d446da 100644 --- a/generated/groundlight_openapi_client/model/detector_type_enum.py +++ b/generated/groundlight_openapi_client/model/detector_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/edge_model_info.py b/generated/groundlight_openapi_client/model/edge_model_info.py index 635ee58c..66826368 100644 --- a/generated/groundlight_openapi_client/model/edge_model_info.py +++ b/generated/groundlight_openapi_client/model/edge_model_info.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/escalation_type_enum.py b/generated/groundlight_openapi_client/model/escalation_type_enum.py index 0a9335dc..2b80360e 100644 --- a/generated/groundlight_openapi_client/model/escalation_type_enum.py +++ b/generated/groundlight_openapi_client/model/escalation_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -102,10 +102,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + args[0] (str):, must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 Keyword Args: - value (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + value (str):, must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -194,10 +194,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + args[0] (str):, must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 Keyword Args: - value (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + value (str):, must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index 80b967b3..79cf0179 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query_type_enum.py b/generated/groundlight_openapi_client/model/image_query_type_enum.py index f1c2367a..42470730 100644 --- a/generated/groundlight_openapi_client/model/image_query_type_enum.py +++ b/generated/groundlight_openapi_client/model/image_query_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200.py b/generated/groundlight_openapi_client/model/inline_response200.py index 2a04fb2b..62e2494f 100644 --- a/generated/groundlight_openapi_client/model/inline_response200.py +++ b/generated/groundlight_openapi_client/model/inline_response200.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response2001.py b/generated/groundlight_openapi_client/model/inline_response2001.py index 45c049c5..cdb30032 100644 --- a/generated/groundlight_openapi_client/model/inline_response2001.py +++ b/generated/groundlight_openapi_client/model/inline_response2001.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py index bd9ce9cc..e3221b58 100644 --- a/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py +++ b/generated/groundlight_openapi_client/model/inline_response2001_evaluation_results.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response2002.py b/generated/groundlight_openapi_client/model/inline_response2002.py index cc2ddd6c..1fbfd51d 100644 --- a/generated/groundlight_openapi_client/model/inline_response2002.py +++ b/generated/groundlight_openapi_client/model/inline_response2002.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200_summary.py b/generated/groundlight_openapi_client/model/inline_response200_summary.py index 278368a6..d5c30a12 100644 --- a/generated/groundlight_openapi_client/model/inline_response200_summary.py +++ b/generated/groundlight_openapi_client/model/inline_response200_summary.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py b/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py index e30fffbf..b2eb180b 100644 --- a/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py +++ b/generated/groundlight_openapi_client/model/inline_response200_summary_class_counts.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/label_value.py b/generated/groundlight_openapi_client/model/label_value.py index 8361b9a2..ee9b73e9 100644 --- a/generated/groundlight_openapi_client/model/label_value.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,13 +30,9 @@ def lazy_import(): - from groundlight_openapi_client.model.annotations_requested_enum import AnnotationsRequestedEnum from groundlight_openapi_client.model.roi import ROI - from groundlight_openapi_client.model.source_enum import SourceEnum - globals()["AnnotationsRequestedEnum"] = AnnotationsRequestedEnum globals()["ROI"] = ROI - globals()["SourceEnum"] = SourceEnum class LabelValue(ModelNormal): @@ -108,23 +104,13 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "annotations_requested": ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501 + "annotations_requested": ([str],), # noqa: E501 "created_at": (datetime,), # noqa: E501 "detector_id": ( int, none_type, ), # noqa: E501 - "source": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 + "source": (str,), # noqa: E501 "text": ( str, none_type, @@ -172,10 +158,10 @@ def _from_openapi_data( Args: confidence (float, none_type): class_name (str, none_type): Return a human-readable class name for this label (e.g. YES/NO) - annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]): + annotations_requested ([str]): created_at (datetime): detector_id (int, none_type): - source (bool, date, datetime, dict, float, int, list, str, none_type): + source (str): text (str, none_type): Text annotations Keyword Args: diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index a940f5a1..434f8ad8 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py index 6c477520..b35a3833 100644 --- a/generated/groundlight_openapi_client/model/mode_enum.py +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py index 8a47db85..f08ee27c 100644 --- a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_classification_result.py b/generated/groundlight_openapi_client/model/multi_classification_result.py index e2b1d822..798c1615 100644 --- a/generated/groundlight_openapi_client/model/multi_classification_result.py +++ b/generated/groundlight_openapi_client/model/multi_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,19 +53,7 @@ class MultiClassificationResult(ModelNormal): as additional properties values. """ - allowed_values = { - ("source",): { - "STILL_PROCESSING": "STILL_PROCESSING", - "CLOUD": "CLOUD", - "USER": "USER", - "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", - "ALGORITHM": "ALGORITHM", - "EDGE": "EDGE", - }, - ("result_type",): { - "MULTI_CLASSIFICATION": "multi_classification", - }, - } + allowed_values = {} validations = { ("confidence",): { @@ -172,7 +160,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "multi_classification" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ @@ -264,7 +252,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "multi_classification" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/note.py b/generated/groundlight_openapi_client/model/note.py index 6fdffe42..64f139a7 100644 --- a/generated/groundlight_openapi_client/model/note.py +++ b/generated/groundlight_openapi_client/model/note.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/note_request.py b/generated/groundlight_openapi_client/model/note_request.py index 0df28f64..95889010 100644 --- a/generated/groundlight_openapi_client/model/note_request.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_detector_list.py b/generated/groundlight_openapi_client/model/paginated_detector_list.py index 8a657572..5972205e 100644 --- a/generated/groundlight_openapi_client/model/paginated_detector_list.py +++ b/generated/groundlight_openapi_client/model/paginated_detector_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_image_query_list.py b/generated/groundlight_openapi_client/model/paginated_image_query_list.py index c8a91fab..13dccab7 100644 --- a/generated/groundlight_openapi_client/model/paginated_image_query_list.py +++ b/generated/groundlight_openapi_client/model/paginated_image_query_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_rule_list.py b/generated/groundlight_openapi_client/model/paginated_rule_list.py index 348b454d..6d007e1d 100644 --- a/generated/groundlight_openapi_client/model/paginated_rule_list.py +++ b/generated/groundlight_openapi_client/model/paginated_rule_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index b6a81c4d..7e7a5813 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -31,11 +31,9 @@ def lazy_import(): from groundlight_openapi_client.model.blank_enum import BlankEnum - from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.status_enum import StatusEnum globals()["BlankEnum"] = BlankEnum - globals()["EscalationTypeEnum"] = EscalationTypeEnum globals()["StatusEnum"] = StatusEnum @@ -78,6 +76,9 @@ class PatchedDetectorRequest(ModelNormal): "inclusive_maximum": 3600, "inclusive_minimum": 0, }, + ("escalation_type",): { + "min_length": 1, + }, } @cached_property @@ -127,17 +128,7 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "escalation_type": ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ), # noqa: E501 + "escalation_type": (str,), # noqa: E501 } @cached_property @@ -196,7 +187,7 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + escalation_type (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -285,7 +276,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + escalation_type (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/payload_template.py b/generated/groundlight_openapi_client/model/payload_template.py index c4bae06c..a721bb4c 100644 --- a/generated/groundlight_openapi_client/model/payload_template.py +++ b/generated/groundlight_openapi_client/model/payload_template.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/payload_template_request.py b/generated/groundlight_openapi_client/model/payload_template_request.py index adc9050f..3a0f12a2 100644 --- a/generated/groundlight_openapi_client/model/payload_template_request.py +++ b/generated/groundlight_openapi_client/model/payload_template_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/result_type_enum.py b/generated/groundlight_openapi_client/model/result_type_enum.py index 5c1f2c7c..c4b954fd 100644 --- a/generated/groundlight_openapi_client/model/result_type_enum.py +++ b/generated/groundlight_openapi_client/model/result_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi.py b/generated/groundlight_openapi_client/model/roi.py index 443d0c07..74c4fc66 100644 --- a/generated/groundlight_openapi_client/model/roi.py +++ b/generated/groundlight_openapi_client/model/roi.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi_request.py b/generated/groundlight_openapi_client/model/roi_request.py index 366c673b..07fb54e2 100644 --- a/generated/groundlight_openapi_client/model/roi_request.py +++ b/generated/groundlight_openapi_client/model/roi_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule.py b/generated/groundlight_openapi_client/model/rule.py index 874ab7f3..7f1be14a 100644 --- a/generated/groundlight_openapi_client/model/rule.py +++ b/generated/groundlight_openapi_client/model/rule.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule_request.py b/generated/groundlight_openapi_client/model/rule_request.py index 9fb7ba11..8fdf8fe1 100644 --- a/generated/groundlight_openapi_client/model/rule_request.py +++ b/generated/groundlight_openapi_client/model/rule_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py index 63eb1c41..f5586bb6 100644 --- a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py +++ b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py index 9ad77b6c..8a2746ab 100644 --- a/generated/groundlight_openapi_client/model/source_enum.py +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -108,10 +108,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 + args[0] (str):, must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 Keyword Args: - value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 + value (str):, must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -200,10 +200,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 + args[0] (str):, must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 Keyword Args: - value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear * `EDGE` - EDGE., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 + value (str):, must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", "EDGE", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/status_enum.py b/generated/groundlight_openapi_client/model/status_enum.py index 7d697180..b41c2871 100644 --- a/generated/groundlight_openapi_client/model/status_enum.py +++ b/generated/groundlight_openapi_client/model/status_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/text_mode_configuration.py b/generated/groundlight_openapi_client/model/text_mode_configuration.py index 67897d83..ee4c4e73 100644 --- a/generated/groundlight_openapi_client/model/text_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/text_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/text_recognition_result.py b/generated/groundlight_openapi_client/model/text_recognition_result.py index de07c128..e0937650 100644 --- a/generated/groundlight_openapi_client/model/text_recognition_result.py +++ b/generated/groundlight_openapi_client/model/text_recognition_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,19 +53,7 @@ class TextRecognitionResult(ModelNormal): as additional properties values. """ - allowed_values = { - ("source",): { - "STILL_PROCESSING": "STILL_PROCESSING", - "CLOUD": "CLOUD", - "USER": "USER", - "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", - "ALGORITHM": "ALGORITHM", - "EDGE": "EDGE", - }, - ("result_type",): { - "TEXT_RECOGNITION": "text_recognition", - }, - } + allowed_values = {} validations = { ("confidence",): { @@ -178,7 +166,7 @@ def _from_openapi_data(cls, text, truncated, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "text_recognition" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ @@ -272,7 +260,7 @@ def __init__(self, text, truncated, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence (float, none_type): [optional] # noqa: E501 source (str): [optional] # noqa: E501 - result_type (str): [optional] if omitted the server will use the default value of "text_recognition" # noqa: E501 + result_type (str): [optional] # noqa: E501 from_edge (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/verb_enum.py b/generated/groundlight_openapi_client/model/verb_enum.py index e3f77857..789dd77c 100644 --- a/generated/groundlight_openapi_client/model/verb_enum.py +++ b/generated/groundlight_openapi_client/model/verb_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -105,10 +105,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + args[0] (str):, must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 Keyword Args: - value (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + value (str):, must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -197,10 +197,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + args[0] (str):, must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 Keyword Args: - value (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + value (str):, must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/webhook_action.py b/generated/groundlight_openapi_client/model/webhook_action.py index 8bd4fc73..33f70f3c 100644 --- a/generated/groundlight_openapi_client/model/webhook_action.py +++ b/generated/groundlight_openapi_client/model/webhook_action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/webhook_action_request.py b/generated/groundlight_openapi_client/model/webhook_action_request.py index 1cf055f8..cd98a99b 100644 --- a/generated/groundlight_openapi_client/model/webhook_action_request.py +++ b/generated/groundlight_openapi_client/model/webhook_action_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model_utils.py b/generated/groundlight_openapi_client/model_utils.py index 660d902a..cf7bd6d4 100644 --- a/generated/groundlight_openapi_client/model_utils.py +++ b/generated/groundlight_openapi_client/model_utils.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/rest.py b/generated/groundlight_openapi_client/rest.py index 14aa376b..16d8ca86 100644 --- a/generated/groundlight_openapi_client/rest.py +++ b/generated/groundlight_openapi_client/rest.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/model.py b/generated/model.py index 642ea9e8..bf04bc1a 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-04-02T04:12:45+00:00 +# timestamp: 2025-04-02T20:50:30+00:00 from __future__ import annotations @@ -11,16 +11,6 @@ from pydantic import AnyUrl, BaseModel, Field, RootModel, confloat, conint, constr -class AnnotationsRequestedEnum(str, Enum): - """ - * `BINARY_CLASSIFICATION` - Binary Classification - * `BOUNDING_BOXES` - Bounding Boxes - """ - - BINARY_CLASSIFICATION = "BINARY_CLASSIFICATION" - BOUNDING_BOXES = "BOUNDING_BOXES" - - class BBoxGeometry(BaseModel): """ Mixin for serializers to handle data in the StrictBaseModel format @@ -49,6 +39,16 @@ class BlankEnum(Enum): field_ = "" +class Condition(BaseModel): + verb: str + parameters: Dict[str, Any] + + +class ConditionRequest(BaseModel): + verb: str + parameters: Dict[str, Any] + + class DetectorGroup(BaseModel): id: str name: constr(max_length=100) @@ -76,16 +76,6 @@ class EdgeModelInfo(BaseModel): predictor_metadata: Optional[Any] = None -class EscalationTypeEnum(str, Enum): - """ - * `STANDARD` - STANDARD - * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING - """ - - STANDARD = "STANDARD" - NO_HUMAN_LABELING = "NO_HUMAN_LABELING" - - class ImageQueryTypeEnum(str, Enum): image_query = "image_query" @@ -161,28 +151,6 @@ class SnoozeTimeUnitEnum(str, Enum): SECONDS = "SECONDS" -class SourceEnum(str, Enum): - """ - * `INITIAL_PLACEHOLDER` - InitialPlaceholder - * `CLOUD` - HumanCloud - * `CUST` - HumanCustomer - * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble - * `ALG` - Algorithm - * `ALG_REC` - AlgorithmReconciled - * `ALG_UNCLEAR` - AlgorithmUnclear - * `EDGE` - EDGE - """ - - INITIAL_PLACEHOLDER = "INITIAL_PLACEHOLDER" - CLOUD = "CLOUD" - CUST = "CUST" - HUMAN_CLOUD_ENSEMBLE = "HUMAN_CLOUD_ENSEMBLE" - ALG = "ALG" - ALG_REC = "ALG_REC" - ALG_UNCLEAR = "ALG_UNCLEAR" - EDGE = "EDGE" - - class StatusEnum(str, Enum): """ * `ON` - ON @@ -193,22 +161,6 @@ class StatusEnum(str, Enum): OFF = "OFF" -class VerbEnum(str, Enum): - """ - * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY - * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME - * `CHANGED_TO` - CHANGED_TO - * `NO_CHANGE` - NO_CHANGE - * `NO_QUERIES` - NO_QUERIES - """ - - ANSWERED_CONSECUTIVELY = "ANSWERED_CONSECUTIVELY" - ANSWERED_WITHIN_TIME = "ANSWERED_WITHIN_TIME" - CHANGED_TO = "CHANGED_TO" - NO_CHANGE = "NO_CHANGE" - NO_QUERIES = "NO_QUERIES" - - class WebhookAction(BaseModel): url: AnyUrl include_image: Optional[bool] = None @@ -227,87 +179,46 @@ class WebhookActionRequest(BaseModel): last_failed_at: Optional[datetime] = None -class Source(str, Enum): - STILL_PROCESSING = "STILL_PROCESSING" - CLOUD = "CLOUD" - USER = "USER" - CLOUD_ENSEMBLE = "CLOUD_ENSEMBLE" - ALGORITHM = "ALGORITHM" - EDGE = "EDGE" - - -class ResultType(str, Enum): - binary_classification = "binary_classification" - - -class Label(str, Enum): - YES = "YES" - NO = "NO" - UNCLEAR = "UNCLEAR" - - class BinaryClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = None - result_type: Optional[ResultType] = None + source: Optional[str] = None + result_type: Optional[str] = None from_edge: Optional[bool] = None - label: Label - - -class ResultType2(str, Enum): - counting = "counting" + label: str class CountingResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = None - result_type: Optional[ResultType2] = None + source: Optional[str] = None + result_type: Optional[str] = None from_edge: Optional[bool] = None count: Optional[conint(ge=0)] = Field(...) greater_than_max: Optional[bool] = None -class ResultType3(str, Enum): - multi_classification = "multi_classification" - - class MultiClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = None - result_type: Optional[ResultType3] = None + source: Optional[str] = None + result_type: Optional[str] = None from_edge: Optional[bool] = None label: str -class ResultType4(str, Enum): - text_recognition = "text_recognition" - - class TextRecognitionResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = None - result_type: Optional[ResultType4] = None + source: Optional[str] = None + result_type: Optional[str] = None from_edge: Optional[bool] = None text: Optional[str] = Field(...) truncated: bool -class ResultType5(str, Enum): - bounding_box = "bounding_box" - - -class Label1(str, Enum): - BOUNDING_BOX = "BOUNDING_BOX" - GREATER_THAN_MAX = "GREATER_THAN_MAX" - UNCLEAR = "UNCLEAR" - - class BoundingBoxResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = None - result_type: Optional[ResultType5] = None + source: Optional[str] = None + result_type: Optional[str] = None from_edge: Optional[bool] = None - label: Label1 + label: str class CountModeConfiguration(BaseModel): @@ -344,6 +255,35 @@ class ActionList(RootModel[List[Action]]): root: List[Action] +class AnnotationsRequestedEnum(str, Enum): + BINARY_CLASSIFICATION = "BINARY_CLASSIFICATION" + BOUNDING_BOXES = "BOUNDING_BOXES" + + +class EscalationTypeEnum(str, Enum): + STANDARD = "STANDARD" + NO_HUMAN_LABELING = "NO_HUMAN_LABELING" + + +class SourceEnum(str, Enum): + INITIAL_PLACEHOLDER = "INITIAL_PLACEHOLDER" + CLOUD = "CLOUD" + CUST = "CUST" + HUMAN_CLOUD_ENSEMBLE = "HUMAN_CLOUD_ENSEMBLE" + ALG = "ALG" + ALG_REC = "ALG_REC" + ALG_UNCLEAR = "ALG_UNCLEAR" + EDGE = "EDGE" + + +class VerbEnum(str, Enum): + ANSWERED_CONSECUTIVELY = "ANSWERED_CONSECUTIVELY" + ANSWERED_WITHIN_TIME = "ANSWERED_WITHIN_TIME" + CHANGED_TO = "CHANGED_TO" + NO_CHANGE = "NO_CHANGE" + NO_QUERIES = "NO_QUERIES" + + class AllNotes(BaseModel): """ Serializes all notes for a given detector, grouped by type as listed in UserProfile.NoteCategoryChoices @@ -354,16 +294,6 @@ class AllNotes(BaseModel): GL: List[Note] -class Condition(BaseModel): - verb: VerbEnum - parameters: Dict[str, Any] - - -class ConditionRequest(BaseModel): - verb: VerbEnum - parameters: Dict[str, Any] - - class Detector(BaseModel): """ Groundlight Detectors provide answers to natural language questions about images. @@ -389,16 +319,10 @@ class Detector(BaseModel): 30.0, description="How long Groundlight will attempt to generate a confident prediction" ) metadata: Optional[Dict[str, Any]] = Field(..., description="Metadata about the detector.") - mode: ModeEnum + mode: str mode_configuration: Optional[Dict[str, Any]] = Field(...) status: Optional[Union[StatusEnum, BlankEnum]] = None - escalation_type: Optional[EscalationTypeEnum] = Field( - None, - description=( - "Category that define internal proccess for labeling image queries\n\n* `STANDARD` - STANDARD\n*" - " `NO_HUMAN_LABELING` - NO_HUMAN_LABELING" - ), - ) + escalation_type: Optional[str] = None class DetectorCreationInputRequest(BaseModel): @@ -483,10 +407,10 @@ class LabelValue(BaseModel): ..., description="Return a human-readable class name for this label (e.g. YES/NO)" ) rois: Optional[List[ROI]] = None - annotations_requested: List[AnnotationsRequestedEnum] + annotations_requested: List[str] created_at: datetime detector_id: Optional[int] = Field(...) - source: SourceEnum + source: str text: Optional[str] = Field(..., description="Text annotations") @@ -532,13 +456,7 @@ class PatchedDetectorRequest(BaseModel): 30.0, description="How long Groundlight will attempt to generate a confident prediction" ) status: Optional[Union[StatusEnum, BlankEnum]] = None - escalation_type: Optional[EscalationTypeEnum] = Field( - None, - description=( - "Category that define internal proccess for labeling image queries\n\n* `STANDARD` - STANDARD\n*" - " `NO_HUMAN_LABELING` - NO_HUMAN_LABELING" - ), - ) + escalation_type: Optional[constr(min_length=1)] = None class Rule(BaseModel): diff --git a/generated/setup.py b/generated/setup.py index 96e1f78a..9c4bb572 100644 --- a/generated/setup.py +++ b/generated/setup.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.3 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/spec/public-api.yaml b/spec/public-api.yaml index e14768b7..528d7cc2 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: Groundlight API - version: 0.18.3 + version: 0.18.2 description: Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. @@ -761,14 +761,6 @@ components: required: - CUSTOMER - GL - AnnotationsRequestedEnum: - enum: - - BINARY_CLASSIFICATION - - BOUNDING_BOXES - type: string - description: |- - * `BINARY_CLASSIFICATION` - Binary Classification - * `BOUNDING_BOXES` - Bounding Boxes BBoxGeometry: type: object description: Mixin for serializers to handle data in the StrictBaseModel format @@ -828,7 +820,7 @@ components: type: object properties: verb: - $ref: '#/components/schemas/VerbEnum' + type: string parameters: type: object additionalProperties: {} @@ -839,7 +831,7 @@ components: type: object properties: verb: - $ref: '#/components/schemas/VerbEnum' + type: string parameters: type: object additionalProperties: {} @@ -903,8 +895,7 @@ components: readOnly: true description: Metadata about the detector. mode: - allOf: - - $ref: '#/components/schemas/ModeEnum' + type: string readOnly: true mode_configuration: type: object @@ -916,13 +907,7 @@ components: - $ref: '#/components/schemas/StatusEnum' - $ref: '#/components/schemas/BlankEnum' escalation_type: - allOf: - - $ref: '#/components/schemas/EscalationTypeEnum' - description: |- - Category that define internal proccess for labeling image queries - - * `STANDARD` - STANDARD - * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING + type: string required: - created_at - group_name @@ -1045,14 +1030,6 @@ components: pipeline_config: {} oodd_pipeline_config: {} predictor_metadata: {} - EscalationTypeEnum: - enum: - - STANDARD - - NO_HUMAN_LABELING - type: string - description: |- - * `STANDARD` - STANDARD - * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING ImageQuery: type: object description: ImageQuery objects are the answers to natural language questions @@ -1166,13 +1143,7 @@ components: annotations_requested: type: array items: - allOf: - - $ref: '#/components/schemas/AnnotationsRequestedEnum' - description: |- - The type of annotation requested - - * `BINARY_CLASSIFICATION` - Binary Classification - * `BOUNDING_BOXES` - Bounding Boxes + type: string readOnly: true created_at: type: string @@ -1183,8 +1154,7 @@ components: nullable: true readOnly: true source: - allOf: - - $ref: '#/components/schemas/SourceEnum' + type: string readOnly: true text: type: string @@ -1219,13 +1189,13 @@ components: - image_query_id - label ModeEnum: + type: string enum: - BINARY - COUNT - MULTI_CLASS - TEXT - BOUNDING_BOX - type: string Note: type: object properties: @@ -1359,13 +1329,8 @@ components: - $ref: '#/components/schemas/StatusEnum' - $ref: '#/components/schemas/BlankEnum' escalation_type: - allOf: - - $ref: '#/components/schemas/EscalationTypeEnum' - description: |- - Category that define internal proccess for labeling image queries - - * `STANDARD` - STANDARD - * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING + type: string + minLength: 1 x-internal: true PayloadTemplate: type: object @@ -1529,26 +1494,6 @@ components: * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS - SourceEnum: - enum: - - INITIAL_PLACEHOLDER - - CLOUD - - CUST - - HUMAN_CLOUD_ENSEMBLE - - ALG - - ALG_REC - - ALG_UNCLEAR - - EDGE - type: string - description: |- - * `INITIAL_PLACEHOLDER` - InitialPlaceholder - * `CLOUD` - HumanCloud - * `CUST` - HumanCustomer - * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble - * `ALG` - Algorithm - * `ALG_REC` - AlgorithmReconciled - * `ALG_UNCLEAR` - AlgorithmUnclear - * `EDGE` - EDGE StatusEnum: enum: - 'ON' @@ -1557,20 +1502,6 @@ components: description: |- * `ON` - ON * `OFF` - OFF - VerbEnum: - enum: - - ANSWERED_CONSECUTIVELY - - ANSWERED_WITHIN_TIME - - CHANGED_TO - - NO_CHANGE - - NO_QUERIES - type: string - description: |- - * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY - * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME - * `CHANGED_TO` - CHANGED_TO - * `NO_CHANGE` - NO_CHANGE - * `NO_QUERIES` - NO_QUERIES WebhookAction: type: object properties: @@ -1631,25 +1562,12 @@ components: nullable: true source: type: string - enum: - - STILL_PROCESSING - - CLOUD - - USER - - CLOUD_ENSEMBLE - - ALGORITHM - - EDGE result_type: type: string - enum: - - binary_classification from_edge: type: boolean label: type: string - enum: - - 'YES' - - 'NO' - - UNCLEAR required: - label CountingResult: @@ -1663,17 +1581,8 @@ components: nullable: true source: type: string - enum: - - STILL_PROCESSING - - CLOUD - - USER - - CLOUD_ENSEMBLE - - ALGORITHM - - EDGE result_type: type: string - enum: - - counting from_edge: type: boolean count: @@ -1695,17 +1604,8 @@ components: nullable: true source: type: string - enum: - - STILL_PROCESSING - - CLOUD - - USER - - CLOUD_ENSEMBLE - - ALGORITHM - - EDGE result_type: type: string - enum: - - multi_classification from_edge: type: boolean label: @@ -1724,17 +1624,8 @@ components: nullable: true source: type: string - enum: - - STILL_PROCESSING - - CLOUD - - USER - - CLOUD_ENSEMBLE - - ALGORITHM - - EDGE result_type: type: string - enum: - - text_recognition from_edge: type: boolean text: @@ -1756,25 +1647,12 @@ components: nullable: true source: type: string - enum: - - STILL_PROCESSING - - CLOUD - - USER - - CLOUD_ENSEMBLE - - ALGORITHM - - EDGE result_type: type: string - enum: - - bounding_box from_edge: type: boolean label: type: string - enum: - - BOUNDING_BOX - - GREATER_THAN_MAX - - UNCLEAR required: - label CountModeConfiguration: @@ -1846,6 +1724,35 @@ components: type: array items: $ref: '#/components/schemas/Action' + AnnotationsRequestedEnum: + type: string + enum: + - BINARY_CLASSIFICATION + - BOUNDING_BOXES + EscalationTypeEnum: + type: string + enum: + - STANDARD + - NO_HUMAN_LABELING + SourceEnum: + type: string + enum: + - INITIAL_PLACEHOLDER + - CLOUD + - CUST + - HUMAN_CLOUD_ENSEMBLE + - ALG + - ALG_REC + - ALG_UNCLEAR + - EDGE + VerbEnum: + type: string + enum: + - ANSWERED_CONSECUTIVELY + - ANSWERED_WITHIN_TIME + - CHANGED_TO + - NO_CHANGE + - NO_QUERIES securitySchemes: ApiToken: name: x-api-token @@ -1859,4 +1766,4 @@ servers: - url: https://device.positronix.ai/device-api description: Device Prod - url: https://device.integ.positronix.ai/device-api - description: Device Integ \ No newline at end of file + description: Device Integ From 42f29f6c5d5618cc55c9602757f20e783d19a1c4 Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Wed, 2 Apr 2025 21:55:56 +0000 Subject: [PATCH 04/13] Temporary changes to remove use of missing Label, Source to unblock while we chase the error --- src/groundlight/binary_labels.py | 9 ++++++++- src/groundlight/internalapi.py | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index 557a4245..cfa804f8 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,11 +8,18 @@ from enum import Enum from typing import Union -from model import Detector, ImageQuery, Label +from model import Detector, ImageQuery #, Label # temporarily commented out logger = logging.getLogger(__name__) +class Label(str, Enum): + """Temporarily added here while we chase the bug """ + YES = "YES" + NO = "NO" + UNCLEAR = "UNCLEAR" + + VALID_DISPLAY_LABELS = {Label.YES, Label.NO, Label.UNCLEAR} diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index 10e1791d..c5325199 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,7 +12,7 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException -from model import Detector, ImageQuery, Source +from model import Detector, ImageQuery # , Source # temporarily commented out from groundlight.status_codes import is_ok from groundlight.version import get_version @@ -76,7 +76,7 @@ def iq_is_answered(iq: ImageQuery) -> bool: """ if not iq.result or not iq.result.source: return False - if (iq.result.source == Source.STILL_PROCESSING) or (iq.result.source is None): # Should never be None + if (iq.result.source == "STILL_PROCESSING") or (iq.result.source is None): # Should never be None return False return True From 522bcb94b707deec17e2767a25f5d519adb7126d Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Wed, 2 Apr 2025 21:56:41 +0000 Subject: [PATCH 05/13] Automatically reformatting code --- src/groundlight/binary_labels.py | 5 +++-- src/groundlight/internalapi.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index cfa804f8..a9e15787 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,13 +8,14 @@ from enum import Enum from typing import Union -from model import Detector, ImageQuery #, Label # temporarily commented out +from model import Detector, ImageQuery # , Label # temporarily commented out logger = logging.getLogger(__name__) class Label(str, Enum): - """Temporarily added here while we chase the bug """ + """Temporarily added here while we chase the bug""" + YES = "YES" NO = "NO" UNCLEAR = "UNCLEAR" diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index c5325199..181aa5cd 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,7 +12,7 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException -from model import Detector, ImageQuery # , Source # temporarily commented out +from model import Detector, ImageQuery # , Source # temporarily commented out from groundlight.status_codes import is_ok from groundlight.version import get_version From ea5bb08922e104d2f0fb8f4d6db90a7f2f89982d Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Wed, 2 Apr 2025 22:01:20 +0000 Subject: [PATCH 06/13] Organize imports --- src/groundlight/binary_labels.py | 3 ++- src/groundlight/internalapi.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index cfa804f8..42984509 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,7 +8,8 @@ from enum import Enum from typing import Union -from model import Detector, ImageQuery #, Label # temporarily commented out +# from model import Detector, ImageQuery, Label # temporarily commented out +from model import Detector, ImageQuery logger = logging.getLogger(__name__) diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index c5325199..ef2281b0 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,7 +12,9 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException -from model import Detector, ImageQuery # , Source # temporarily commented out + +# from model import Detector, ImageQuery, Source # temporarily commented out +from model import Detector, ImageQuery from groundlight.status_codes import is_ok from groundlight.version import get_version From 23afab2b52ede97dd2d9b65e44966dec72bda1f8 Mon Sep 17 00:00:00 2001 From: brandon Date: Mon, 7 Apr 2025 10:38:36 -0700 Subject: [PATCH 07/13] bugfix related to removing enum validation --- src/groundlight/experimental_api.py | 9 +++------ test/unit/test_experimental.py | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 73511dcc..eff720d6 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -30,7 +30,6 @@ from groundlight_openapi_client.model.payload_template_request import PayloadTemplateRequest from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.status_enum import StatusEnum -from groundlight_openapi_client.model.verb_enum import VerbEnum from groundlight_openapi_client.model.webhook_action_request import WebhookActionRequest from model import ( ROI, @@ -302,7 +301,7 @@ def create_alert( # pylint: disable=too-many-locals, too-many-arguments # noqa name=name, enabled=enabled, action=actions, - condition=ConditionRequest(verb=VerbEnum(condition.verb), parameters=condition.parameters), + condition=ConditionRequest(verb=condition.verb, parameters=condition.parameters), snooze_time_enabled=snooze_time_enabled, snooze_time_value=snooze_time_value, snooze_time_unit=snooze_time_unit, @@ -318,7 +317,7 @@ def create_rule( # pylint: disable=too-many-locals # noqa: PLR0913 channel: Union[str, ChannelEnum], recipient: str, *, - alert_on: Union[str, VerbEnum] = "CHANGED_TO", + alert_on: str = "CHANGED_TO", enabled: bool = True, include_image: bool = False, condition_parameters: Union[str, dict, None] = None, @@ -393,8 +392,6 @@ def create_rule( # pylint: disable=too-many-locals # noqa: PLR0913 if condition_parameters is None: condition_parameters = {} - if isinstance(alert_on, str): - alert_on = VerbEnum(alert_on.upper()) if isinstance(channel, str): channel = ChannelEnum(channel.upper()) if isinstance(condition_parameters, str): @@ -839,7 +836,7 @@ def update_detector_escalation_type(self, detector: Union[str, Detector], escala raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") self.detectors_api.update_detector( detector, - patched_detector_request=PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type)), + patched_detector_request=PatchedDetectorRequest(escalation_type=escalation_type), ) def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals diff --git a/test/unit/test_experimental.py b/test/unit/test_experimental.py index 2dacdccc..0b597649 100644 --- a/test/unit/test_experimental.py +++ b/test/unit/test_experimental.py @@ -60,10 +60,10 @@ def test_update_detector_escalation_type(gl_experimental: ExperimentalApi): detector = gl_experimental.get_or_create_detector(f"test {datetime.utcnow()}", "Is there a dog?") gl_experimental.update_detector_escalation_type(detector.id, "NO_HUMAN_LABELING") updated_detector = gl_experimental.get_detector(detector.id) - updated_detector.escalation_type.value == "NO_HUMAN_LABELING" + updated_detector.escalation_type == "NO_HUMAN_LABELING" gl_experimental.update_detector_escalation_type(detector.id, "STANDARD") updated_detector = gl_experimental.get_detector(detector.id) - updated_detector.escalation_type.value == "STANDARD" + updated_detector.escalation_type == "STANDARD" @pytest.mark.skip( From 88c4e304d8383c8c8f9bc2b654105c9961900188 Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Mon, 7 Apr 2025 17:40:03 +0000 Subject: [PATCH 08/13] Automatically reformatting code --- src/groundlight/experimental_api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index eff720d6..98c3754e 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -24,7 +24,6 @@ from groundlight_openapi_client.model.condition_request import ConditionRequest from groundlight_openapi_client.model.count_mode_configuration import CountModeConfiguration from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest -from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.multi_class_mode_configuration import MultiClassModeConfiguration from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.payload_template_request import PayloadTemplateRequest From 215b50096fe1bf9971d8654e753bf12c9e42b820 Mon Sep 17 00:00:00 2001 From: paulina-positronix Date: Mon, 7 Apr 2025 19:46:53 +0000 Subject: [PATCH 09/13] Revert "Temporary changes to remove use of missing Label, Source to unblock while we chase the error" This reverts commit 42f29f6c5d5618cc55c9602757f20e783d19a1c4. --- src/groundlight/binary_labels.py | 11 +---------- src/groundlight/internalapi.py | 6 ++---- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index 42098f19..557a4245 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,20 +8,11 @@ from enum import Enum from typing import Union -# from model import Detector, ImageQuery, Label # temporarily commented out -from model import Detector, ImageQuery +from model import Detector, ImageQuery, Label logger = logging.getLogger(__name__) -class Label(str, Enum): - """Temporarily added here while we chase the bug""" - - YES = "YES" - NO = "NO" - UNCLEAR = "UNCLEAR" - - VALID_DISPLAY_LABELS = {Label.YES, Label.NO, Label.UNCLEAR} diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index ef2281b0..10e1791d 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,9 +12,7 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException - -# from model import Detector, ImageQuery, Source # temporarily commented out -from model import Detector, ImageQuery +from model import Detector, ImageQuery, Source from groundlight.status_codes import is_ok from groundlight.version import get_version @@ -78,7 +76,7 @@ def iq_is_answered(iq: ImageQuery) -> bool: """ if not iq.result or not iq.result.source: return False - if (iq.result.source == "STILL_PROCESSING") or (iq.result.source is None): # Should never be None + if (iq.result.source == Source.STILL_PROCESSING) or (iq.result.source is None): # Should never be None return False return True From f782f058c58503af8acd6f6263e6ac26a1f7d1b9 Mon Sep 17 00:00:00 2001 From: brandon Date: Mon, 7 Apr 2025 13:19:50 -0700 Subject: [PATCH 10/13] Revert "Revert "Temporary changes to remove use of missing Label, Source to unblock while we chase the error"" This reverts commit 215b50096fe1bf9971d8654e753bf12c9e42b820. --- src/groundlight/binary_labels.py | 11 ++++++++++- src/groundlight/internalapi.py | 6 ++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index 557a4245..42098f19 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,11 +8,20 @@ from enum import Enum from typing import Union -from model import Detector, ImageQuery, Label +# from model import Detector, ImageQuery, Label # temporarily commented out +from model import Detector, ImageQuery logger = logging.getLogger(__name__) +class Label(str, Enum): + """Temporarily added here while we chase the bug""" + + YES = "YES" + NO = "NO" + UNCLEAR = "UNCLEAR" + + VALID_DISPLAY_LABELS = {Label.YES, Label.NO, Label.UNCLEAR} diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index 10e1791d..ef2281b0 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,7 +12,9 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException -from model import Detector, ImageQuery, Source + +# from model import Detector, ImageQuery, Source # temporarily commented out +from model import Detector, ImageQuery from groundlight.status_codes import is_ok from groundlight.version import get_version @@ -76,7 +78,7 @@ def iq_is_answered(iq: ImageQuery) -> bool: """ if not iq.result or not iq.result.source: return False - if (iq.result.source == Source.STILL_PROCESSING) or (iq.result.source is None): # Should never be None + if (iq.result.source == "STILL_PROCESSING") or (iq.result.source is None): # Should never be None return False return True From 25f6b9ed1be4e3bd67b39268f55fe97f273aa244 Mon Sep 17 00:00:00 2001 From: brandon Date: Mon, 7 Apr 2025 13:21:19 -0700 Subject: [PATCH 11/13] remove temp comments --- src/groundlight/binary_labels.py | 1 - src/groundlight/internalapi.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index 42098f19..6e589fe9 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -8,7 +8,6 @@ from enum import Enum from typing import Union -# from model import Detector, ImageQuery, Label # temporarily commented out from model import Detector, ImageQuery logger = logging.getLogger(__name__) diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index ef2281b0..c1cac676 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -13,7 +13,6 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException -# from model import Detector, ImageQuery, Source # temporarily commented out from model import Detector, ImageQuery from groundlight.status_codes import is_ok From 30c97184df13ec8b3c6b7c03751ce85b315cd9aa Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Mon, 7 Apr 2025 20:22:15 +0000 Subject: [PATCH 12/13] Automatically reformatting code --- src/groundlight/internalapi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index c1cac676..7c3c6849 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -12,7 +12,6 @@ import requests from groundlight_openapi_client.api_client import ApiClient, ApiException - from model import Detector, ImageQuery from groundlight.status_codes import is_ok From fe5f86ad79a38f23efaa91e06f6c3e05a4d91e78 Mon Sep 17 00:00:00 2001 From: Paulina Varshavskaya <88207457+paulina-positronix@users.noreply.github.com> Date: Mon, 7 Apr 2025 16:14:49 -0700 Subject: [PATCH 13/13] Remove temporary comment --- src/groundlight/binary_labels.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/groundlight/binary_labels.py b/src/groundlight/binary_labels.py index 6e589fe9..bf096084 100644 --- a/src/groundlight/binary_labels.py +++ b/src/groundlight/binary_labels.py @@ -14,8 +14,6 @@ class Label(str, Enum): - """Temporarily added here while we chase the bug""" - YES = "YES" NO = "NO" UNCLEAR = "UNCLEAR"