From 457aaaf0f94e512188eac02caad53ed120419b34 Mon Sep 17 00:00:00 2001 From: Brandon <132288221+brandon-groundlight@users.noreply.github.com> Date: Tue, 8 Oct 2024 12:51:32 -0700 Subject: [PATCH] Makes source no longer required (#260) This should in theory make v0.18 backwards. compatible --- generated/README.md | 2 +- generated/docs/BinaryClassificationResult.md | 2 +- generated/docs/CountingResult.md | 2 +- generated/docs/LabelValue.md | 2 +- generated/docs/MultiClassificationResult.md | 2 +- .../groundlight_openapi_client/__init__.py | 2 +- .../api/actions_api.py | 2 +- .../api/detector_groups_api.py | 2 +- .../api/detector_reset_api.py | 2 +- .../api/detectors_api.py | 2 +- .../api/image_queries_api.py | 2 +- .../api/labels_api.py | 2 +- .../api/notes_api.py | 2 +- .../api/user_api.py | 2 +- .../groundlight_openapi_client/api_client.py | 2 +- .../configuration.py | 4 +-- .../groundlight_openapi_client/exceptions.py | 2 +- .../model/action.py | 2 +- .../model/action_list.py | 2 +- .../model/all_notes.py | 2 +- .../model/annotations_requested_enum.py | 2 +- .../model/b_box_geometry.py | 2 +- .../model/b_box_geometry_request.py | 2 +- .../model/binary_classification_result.py | 26 ++++++++--------- .../model/blank_enum.py | 2 +- .../model/channel_enum.py | 2 +- .../model/condition.py | 2 +- .../model/condition_request.py | 2 +- .../model/count_mode_configuration.py | 2 +- .../model/counting_result.py | 16 +++++------ .../model/detector.py | 2 +- .../model/detector_creation_input_request.py | 2 +- .../model/detector_group.py | 2 +- .../model/detector_group_request.py | 2 +- .../model/detector_type_enum.py | 2 +- .../model/escalation_type_enum.py | 2 +- .../model/image_query.py | 2 +- .../model/image_query_type_enum.py | 2 +- .../model/inline_response200.py | 2 +- .../model/label_value.py | 28 +++++++++---------- .../model/label_value_request.py | 2 +- .../model/mode_enum.py | 2 +- .../model/multi_class_mode_configuration.py | 2 +- .../model/multi_classification_result.py | 16 +++++------ .../groundlight_openapi_client/model/note.py | 2 +- .../model/note_request.py | 2 +- .../model/paginated_detector_list.py | 2 +- .../model/paginated_image_query_list.py | 2 +- .../model/paginated_rule_list.py | 2 +- .../model/patched_detector_request.py | 2 +- .../model/result_type_enum.py | 2 +- .../groundlight_openapi_client/model/roi.py | 2 +- .../model/roi_request.py | 2 +- .../groundlight_openapi_client/model/rule.py | 2 +- .../model/rule_request.py | 2 +- .../model/snooze_time_unit_enum.py | 2 +- .../model/source_enum.py | 2 +- .../model/status_enum.py | 2 +- .../model/verb_enum.py | 2 +- .../groundlight_openapi_client/model_utils.py | 2 +- generated/groundlight_openapi_client/rest.py | 2 +- generated/model.py | 14 ++++++---- generated/setup.py | 2 +- package-lock.json | 2 ++ pyproject.toml | 2 +- spec/public-api.yaml | 9 +++--- src/groundlight/client.py | 2 +- src/groundlight/experimental_api.py | 2 +- 68 files changed, 117 insertions(+), 118 deletions(-) diff --git a/generated/README.md b/generated/README.md index 7dae07bd..22aa3842 100644 --- a/generated/README.md +++ b/generated/README.md @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.15.3 +- API version: 0.18.1 - Package version: 1.0.0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen diff --git a/generated/docs/BinaryClassificationResult.md b/generated/docs/BinaryClassificationResult.md index 2b0d5df5..655da3ab 100644 --- a/generated/docs/BinaryClassificationResult.md +++ b/generated/docs/BinaryClassificationResult.md @@ -4,9 +4,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**source** | **str** | | **label** | **str** | | **confidence** | **float** | | [optional] +**source** | **str** | Source is optional to support edge v0.2 | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/CountingResult.md b/generated/docs/CountingResult.md index 486b6067..dcf8d6da 100644 --- a/generated/docs/CountingResult.md +++ b/generated/docs/CountingResult.md @@ -4,9 +4,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**source** | **str** | | **count** | **int** | | **confidence** | **float** | | [optional] +**source** | **str** | Source is optional to support edge v0.2 | [optional] **greater_than_max** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/LabelValue.md b/generated/docs/LabelValue.md index acbb0e6f..4869e48c 100644 --- a/generated/docs/LabelValue.md +++ b/generated/docs/LabelValue.md @@ -9,9 +9,9 @@ Name | Type | Description | Notes **annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly] **created_at** | **datetime** | | [readonly] **detector_id** | **int, none_type** | | [readonly] -**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] **text** | **str, none_type** | Text annotations | [readonly] **rois** | [**[ROI], none_type**](ROI.md) | | [optional] +**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] [readonly] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/MultiClassificationResult.md b/generated/docs/MultiClassificationResult.md index 477ff1f4..2ab36f78 100644 --- a/generated/docs/MultiClassificationResult.md +++ b/generated/docs/MultiClassificationResult.md @@ -4,9 +4,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**source** | **str** | | **label** | **str** | | **confidence** | **float** | | [optional] +**source** | **str** | Source is optional to support edge v0.2 | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/groundlight_openapi_client/__init__.py b/generated/groundlight_openapi_client/__init__.py index c1149a99..3f67f120 100644 --- a/generated/groundlight_openapi_client/__init__.py +++ b/generated/groundlight_openapi_client/__init__.py @@ -5,7 +5,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/actions_api.py b/generated/groundlight_openapi_client/api/actions_api.py index ee5c7086..0670bad0 100644 --- a/generated/groundlight_openapi_client/api/actions_api.py +++ b/generated/groundlight_openapi_client/api/actions_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_groups_api.py b/generated/groundlight_openapi_client/api/detector_groups_api.py index 7479978b..89e739ff 100644 --- a/generated/groundlight_openapi_client/api/detector_groups_api.py +++ b/generated/groundlight_openapi_client/api/detector_groups_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_reset_api.py b/generated/groundlight_openapi_client/api/detector_reset_api.py index 6940a302..7662bbf8 100644 --- a/generated/groundlight_openapi_client/api/detector_reset_api.py +++ b/generated/groundlight_openapi_client/api/detector_reset_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index 9c197f73..e054c2c5 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/image_queries_api.py b/generated/groundlight_openapi_client/api/image_queries_api.py index eaef9be3..ebf133a9 100644 --- a/generated/groundlight_openapi_client/api/image_queries_api.py +++ b/generated/groundlight_openapi_client/api/image_queries_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/labels_api.py b/generated/groundlight_openapi_client/api/labels_api.py index 80593cf2..884151e4 100644 --- a/generated/groundlight_openapi_client/api/labels_api.py +++ b/generated/groundlight_openapi_client/api/labels_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index 00b937e7..6836fb2e 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/user_api.py b/generated/groundlight_openapi_client/api/user_api.py index 7bfbccfe..66937738 100644 --- a/generated/groundlight_openapi_client/api/user_api.py +++ b/generated/groundlight_openapi_client/api/user_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api_client.py b/generated/groundlight_openapi_client/api_client.py index 6fa02959..ac14fb86 100644 --- a/generated/groundlight_openapi_client/api_client.py +++ b/generated/groundlight_openapi_client/api_client.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/configuration.py b/generated/groundlight_openapi_client/configuration.py index e47e2dd0..9aab02ac 100644 --- a/generated/groundlight_openapi_client/configuration.py +++ b/generated/groundlight_openapi_client/configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -419,7 +419,7 @@ def to_debug_report(self): "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" - "Version of the API: 0.15.3\n" + "Version of the API: 0.18.1\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) ) diff --git a/generated/groundlight_openapi_client/exceptions.py b/generated/groundlight_openapi_client/exceptions.py index 41b32990..5ab58207 100644 --- a/generated/groundlight_openapi_client/exceptions.py +++ b/generated/groundlight_openapi_client/exceptions.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action.py b/generated/groundlight_openapi_client/model/action.py index 5f98f682..1199e0d8 100644 --- a/generated/groundlight_openapi_client/model/action.py +++ b/generated/groundlight_openapi_client/model/action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action_list.py b/generated/groundlight_openapi_client/model/action_list.py index 0fa31ca8..16954eef 100644 --- a/generated/groundlight_openapi_client/model/action_list.py +++ b/generated/groundlight_openapi_client/model/action_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/all_notes.py b/generated/groundlight_openapi_client/model/all_notes.py index 5b68adc8..aa96ed47 100644 --- a/generated/groundlight_openapi_client/model/all_notes.py +++ b/generated/groundlight_openapi_client/model/all_notes.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/annotations_requested_enum.py b/generated/groundlight_openapi_client/model/annotations_requested_enum.py index dc037938..5a4789bd 100644 --- a/generated/groundlight_openapi_client/model/annotations_requested_enum.py +++ b/generated/groundlight_openapi_client/model/annotations_requested_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry.py b/generated/groundlight_openapi_client/model/b_box_geometry.py index b1c352d9..1bfa3d7a 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry_request.py b/generated/groundlight_openapi_client/model/b_box_geometry_request.py index 6e756bd6..f23ce8d2 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry_request.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/binary_classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py index 4cbc8eef..3abe590e 100644 --- a/generated/groundlight_openapi_client/model/binary_classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -54,6 +54,11 @@ class BinaryClassificationResult(ModelNormal): """ allowed_values = { + ("label",): { + "YES": "YES", + "NO": "NO", + "UNCLEAR": "UNCLEAR", + }, ("source",): { "STILL_PROCESSING": "STILL_PROCESSING", "CLOUD": "CLOUD", @@ -61,11 +66,6 @@ class BinaryClassificationResult(ModelNormal): "CLOUD_ENSEMBLE": "CLOUD_ENSEMBLE", "ALGORITHM": "ALGORITHM", }, - ("label",): { - "YES": "YES", - "NO": "NO", - "UNCLEAR": "UNCLEAR", - }, } validations = { @@ -106,9 +106,9 @@ def openapi_types(): and the value is attribute type. """ return { - "source": (str,), # noqa: E501 "label": (str,), # noqa: E501 "confidence": (float,), # noqa: E501 + "source": (str,), # noqa: E501 } @cached_property @@ -116,9 +116,9 @@ def discriminator(): return None attribute_map = { - "source": "source", # noqa: E501 "label": "label", # noqa: E501 "confidence": "confidence", # noqa: E501 + "source": "source", # noqa: E501 } read_only_vars = {} @@ -127,11 +127,10 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 """BinaryClassificationResult - a model defined in OpenAPI Args: - source (str): label (str): Keyword Args: @@ -166,6 +165,7 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -194,7 +194,6 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.label = label for var_name, var_value in kwargs.items(): if ( @@ -218,11 +217,10 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, source, label, *args, **kwargs): # noqa: E501 + def __init__(self, label, *args, **kwargs): # noqa: E501 """BinaryClassificationResult - a model defined in OpenAPI Args: - source (str): label (str): Keyword Args: @@ -257,6 +255,7 @@ def __init__(self, source, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -283,7 +282,6 @@ def __init__(self, source, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.label = label for var_name, var_value in kwargs.items(): if ( diff --git a/generated/groundlight_openapi_client/model/blank_enum.py b/generated/groundlight_openapi_client/model/blank_enum.py index 8f634d5b..d7a16227 100644 --- a/generated/groundlight_openapi_client/model/blank_enum.py +++ b/generated/groundlight_openapi_client/model/blank_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/channel_enum.py b/generated/groundlight_openapi_client/model/channel_enum.py index 30247f5b..6590b1cc 100644 --- a/generated/groundlight_openapi_client/model/channel_enum.py +++ b/generated/groundlight_openapi_client/model/channel_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition.py b/generated/groundlight_openapi_client/model/condition.py index abc74401..7d67f28d 100644 --- a/generated/groundlight_openapi_client/model/condition.py +++ b/generated/groundlight_openapi_client/model/condition.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition_request.py b/generated/groundlight_openapi_client/model/condition_request.py index a629fe99..9ce8cd01 100644 --- a/generated/groundlight_openapi_client/model/condition_request.py +++ b/generated/groundlight_openapi_client/model/condition_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/count_mode_configuration.py b/generated/groundlight_openapi_client/model/count_mode_configuration.py index ecaa4c40..e28e500e 100644 --- a/generated/groundlight_openapi_client/model/count_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/count_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/counting_result.py b/generated/groundlight_openapi_client/model/counting_result.py index 4eda9b28..5c8daaa5 100644 --- a/generated/groundlight_openapi_client/model/counting_result.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -101,9 +101,9 @@ def openapi_types(): and the value is attribute type. """ return { - "source": (str,), # noqa: E501 "count": (int,), # noqa: E501 "confidence": (float,), # noqa: E501 + "source": (str,), # noqa: E501 "greater_than_max": (bool,), # noqa: E501 } @@ -112,9 +112,9 @@ def discriminator(): return None attribute_map = { - "source": "source", # noqa: E501 "count": "count", # noqa: E501 "confidence": "confidence", # noqa: E501 + "source": "source", # noqa: E501 "greater_than_max": "greater_than_max", # noqa: E501 } @@ -124,11 +124,10 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, source, count, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, count, *args, **kwargs): # noqa: E501 """CountingResult - a model defined in OpenAPI Args: - source (str): count (int): Keyword Args: @@ -163,6 +162,7 @@ def _from_openapi_data(cls, source, count, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ @@ -192,7 +192,6 @@ def _from_openapi_data(cls, source, count, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.count = count for var_name, var_value in kwargs.items(): if ( @@ -216,11 +215,10 @@ def _from_openapi_data(cls, source, count, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, source, count, *args, **kwargs): # noqa: E501 + def __init__(self, count, *args, **kwargs): # noqa: E501 """CountingResult - a model defined in OpenAPI Args: - source (str): count (int): Keyword Args: @@ -255,6 +253,7 @@ def __init__(self, source, count, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ @@ -282,7 +281,6 @@ def __init__(self, source, count, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.count = count for var_name, var_value in kwargs.items(): if ( diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index 23761a8d..b7095d8a 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_creation_input_request.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py index 358ed0b1..db38f022 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input_request.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group.py b/generated/groundlight_openapi_client/model/detector_group.py index bed76807..60350686 100644 --- a/generated/groundlight_openapi_client/model/detector_group.py +++ b/generated/groundlight_openapi_client/model/detector_group.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group_request.py b/generated/groundlight_openapi_client/model/detector_group_request.py index 0730767f..cb98b8fe 100644 --- a/generated/groundlight_openapi_client/model/detector_group_request.py +++ b/generated/groundlight_openapi_client/model/detector_group_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_type_enum.py b/generated/groundlight_openapi_client/model/detector_type_enum.py index e283920d..c20c0f96 100644 --- a/generated/groundlight_openapi_client/model/detector_type_enum.py +++ b/generated/groundlight_openapi_client/model/detector_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/escalation_type_enum.py b/generated/groundlight_openapi_client/model/escalation_type_enum.py index 291e25a5..99c297d5 100644 --- a/generated/groundlight_openapi_client/model/escalation_type_enum.py +++ b/generated/groundlight_openapi_client/model/escalation_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index 497037f0..0d844e01 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query_type_enum.py b/generated/groundlight_openapi_client/model/image_query_type_enum.py index 8460fb1e..5c24a7f9 100644 --- a/generated/groundlight_openapi_client/model/image_query_type_enum.py +++ b/generated/groundlight_openapi_client/model/image_query_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200.py b/generated/groundlight_openapi_client/model/inline_response200.py index 746eace5..78d48bf3 100644 --- a/generated/groundlight_openapi_client/model/inline_response200.py +++ b/generated/groundlight_openapi_client/model/inline_response200.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/label_value.py b/generated/groundlight_openapi_client/model/label_value.py index 1aaf7381..9bce25b9 100644 --- a/generated/groundlight_openapi_client/model/label_value.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -114,6 +114,14 @@ def openapi_types(): int, none_type, ), # noqa: E501 + "text": ( + str, + none_type, + ), # noqa: E501 + "rois": ( + [ROI], + none_type, + ), # noqa: E501 "source": ( bool, date, @@ -125,14 +133,6 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "text": ( - str, - none_type, - ), # noqa: E501 - "rois": ( - [ROI], - none_type, - ), # noqa: E501 } @cached_property @@ -145,9 +145,9 @@ def discriminator(): "annotations_requested": "annotations_requested", # noqa: E501 "created_at": "created_at", # noqa: E501 "detector_id": "detector_id", # noqa: E501 - "source": "source", # noqa: E501 "text": "text", # noqa: E501 "rois": "rois", # noqa: E501 + "source": "source", # noqa: E501 } read_only_vars = { @@ -156,8 +156,8 @@ def discriminator(): "annotations_requested", # noqa: E501 "created_at", # noqa: E501 "detector_id", # noqa: E501 - "source", # noqa: E501 "text", # noqa: E501 + "source", # noqa: E501 } _composed_schemas = {} @@ -165,7 +165,7 @@ def discriminator(): @classmethod @convert_js_args_to_python_args def _from_openapi_data( - cls, confidence, class_name, annotations_requested, created_at, detector_id, source, text, *args, **kwargs + cls, confidence, class_name, annotations_requested, created_at, detector_id, text, *args, **kwargs ): # noqa: E501 """LabelValue - a model defined in OpenAPI @@ -175,7 +175,6 @@ def _from_openapi_data( annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]): created_at (datetime): detector_id (int, none_type): - source (bool, date, datetime, dict, float, int, list, str, none_type): text (str, none_type): Text annotations Keyword Args: @@ -210,6 +209,7 @@ def _from_openapi_data( through its discriminator because we passed in _visited_composed_classes = (Animal,) rois ([ROI], none_type): [optional] # noqa: E501 + source (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -243,7 +243,6 @@ def _from_openapi_data( self.annotations_requested = annotations_requested self.created_at = created_at self.detector_id = detector_id - self.source = source self.text = text for var_name, var_value in kwargs.items(): if ( @@ -302,6 +301,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) rois ([ROI], none_type): [optional] # noqa: E501 + source (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index 36ecde44..a388191d 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py index 1ba41227..46bed042 100644 --- a/generated/groundlight_openapi_client/model/mode_enum.py +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py index 88c5906a..6fb5f80d 100644 --- a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_classification_result.py b/generated/groundlight_openapi_client/model/multi_classification_result.py index d18c07c8..9ee8a63f 100644 --- a/generated/groundlight_openapi_client/model/multi_classification_result.py +++ b/generated/groundlight_openapi_client/model/multi_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -101,9 +101,9 @@ def openapi_types(): and the value is attribute type. """ return { - "source": (str,), # noqa: E501 "label": (str,), # noqa: E501 "confidence": (float,), # noqa: E501 + "source": (str,), # noqa: E501 } @cached_property @@ -111,9 +111,9 @@ def discriminator(): return None attribute_map = { - "source": "source", # noqa: E501 "label": "label", # noqa: E501 "confidence": "confidence", # noqa: E501 + "source": "source", # noqa: E501 } read_only_vars = {} @@ -122,11 +122,10 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 """MultiClassificationResult - a model defined in OpenAPI Args: - source (str): label (str): Keyword Args: @@ -161,6 +160,7 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -189,7 +189,6 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.label = label for var_name, var_value in kwargs.items(): if ( @@ -213,11 +212,10 @@ def _from_openapi_data(cls, source, label, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, source, label, *args, **kwargs): # noqa: E501 + def __init__(self, label, *args, **kwargs): # noqa: E501 """MultiClassificationResult - a model defined in OpenAPI Args: - source (str): label (str): Keyword Args: @@ -252,6 +250,7 @@ def __init__(self, source, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 + source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -278,7 +277,6 @@ def __init__(self, source, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.source = source self.label = label for var_name, var_value in kwargs.items(): if ( diff --git a/generated/groundlight_openapi_client/model/note.py b/generated/groundlight_openapi_client/model/note.py index 01fee17b..8799080f 100644 --- a/generated/groundlight_openapi_client/model/note.py +++ b/generated/groundlight_openapi_client/model/note.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/note_request.py b/generated/groundlight_openapi_client/model/note_request.py index 545cb56c..32d6b05a 100644 --- a/generated/groundlight_openapi_client/model/note_request.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_detector_list.py b/generated/groundlight_openapi_client/model/paginated_detector_list.py index 0a93c835..51471f0e 100644 --- a/generated/groundlight_openapi_client/model/paginated_detector_list.py +++ b/generated/groundlight_openapi_client/model/paginated_detector_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_image_query_list.py b/generated/groundlight_openapi_client/model/paginated_image_query_list.py index 2436d033..39a70a0e 100644 --- a/generated/groundlight_openapi_client/model/paginated_image_query_list.py +++ b/generated/groundlight_openapi_client/model/paginated_image_query_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_rule_list.py b/generated/groundlight_openapi_client/model/paginated_rule_list.py index 72a26a98..bbd1b9bb 100644 --- a/generated/groundlight_openapi_client/model/paginated_rule_list.py +++ b/generated/groundlight_openapi_client/model/paginated_rule_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index 3572396d..a2fcd579 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/result_type_enum.py b/generated/groundlight_openapi_client/model/result_type_enum.py index 8441a6c6..3220f6a0 100644 --- a/generated/groundlight_openapi_client/model/result_type_enum.py +++ b/generated/groundlight_openapi_client/model/result_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi.py b/generated/groundlight_openapi_client/model/roi.py index 65db005f..07870cc5 100644 --- a/generated/groundlight_openapi_client/model/roi.py +++ b/generated/groundlight_openapi_client/model/roi.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi_request.py b/generated/groundlight_openapi_client/model/roi_request.py index 07dbb5cc..a1f63aa8 100644 --- a/generated/groundlight_openapi_client/model/roi_request.py +++ b/generated/groundlight_openapi_client/model/roi_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule.py b/generated/groundlight_openapi_client/model/rule.py index 1b5c6b33..5884c996 100644 --- a/generated/groundlight_openapi_client/model/rule.py +++ b/generated/groundlight_openapi_client/model/rule.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule_request.py b/generated/groundlight_openapi_client/model/rule_request.py index 34473003..62234b6f 100644 --- a/generated/groundlight_openapi_client/model/rule_request.py +++ b/generated/groundlight_openapi_client/model/rule_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py index 30428c63..555d4076 100644 --- a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py +++ b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py index cb24e36a..6248ac27 100644 --- a/generated/groundlight_openapi_client/model/source_enum.py +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/status_enum.py b/generated/groundlight_openapi_client/model/status_enum.py index fe8df1e7..ac77a141 100644 --- a/generated/groundlight_openapi_client/model/status_enum.py +++ b/generated/groundlight_openapi_client/model/status_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/verb_enum.py b/generated/groundlight_openapi_client/model/verb_enum.py index b2248ca5..84d601a3 100644 --- a/generated/groundlight_openapi_client/model/verb_enum.py +++ b/generated/groundlight_openapi_client/model/verb_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model_utils.py b/generated/groundlight_openapi_client/model_utils.py index d96edfcc..e5da59e1 100644 --- a/generated/groundlight_openapi_client/model_utils.py +++ b/generated/groundlight_openapi_client/model_utils.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/rest.py b/generated/groundlight_openapi_client/rest.py index 7ffe9b28..01876d57 100644 --- a/generated/groundlight_openapi_client/rest.py +++ b/generated/groundlight_openapi_client/rest.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/model.py b/generated/model.py index 452641a1..b8714371 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2024-10-05T01:53:23+00:00 +# timestamp: 2024-10-08T18:27:05+00:00 from __future__ import annotations @@ -178,6 +178,10 @@ class VerbEnum(str, Enum): class Source(str, Enum): + """ + Source is optional to support edge v0.2 + """ + STILL_PROCESSING = "STILL_PROCESSING" CLOUD = "CLOUD" USER = "USER" @@ -193,20 +197,20 @@ class Label(str, Enum): class BinaryClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Source + source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") label: Label class CountingResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Source + source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") count: int greater_than_max: Optional[bool] = None class MultiClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Source + source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") label: str @@ -358,7 +362,7 @@ class LabelValue(BaseModel): annotations_requested: List[AnnotationsRequestedEnum] created_at: datetime detector_id: Optional[int] = Field(...) - source: SourceEnum + source: Optional[SourceEnum] = None text: Optional[str] = Field(..., description="Text annotations") diff --git a/generated/setup.py b/generated/setup.py index 3e2017e3..9c4c9456 100644 --- a/generated/setup.py +++ b/generated/setup.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.15.3 + The version of the OpenAPI document: 0.18.1 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/package-lock.json b/package-lock.json index 016a11ee..c8c16bf4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1677,6 +1677,7 @@ "version": "7.0.1", "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0", "@types/katex": "^0.16.0", @@ -1695,6 +1696,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz", "integrity": "sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==", + "license": "MIT", "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-math": "^3.0.0", diff --git a/pyproject.toml b/pyproject.toml index 0bd4690d..9b398174 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ packages = [ {include = "**/*.py", from = "src"}, ] readme = "README.md" -version = "0.18.0" +version = "0.18.1" [tool.poetry.dependencies] # For certifi, use ">=" instead of "^" since it upgrades its "major version" every year, not really following semver diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 87fb8739..f3af8e4b 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: Groundlight API - version: 0.15.3 + version: 0.18.1 description: Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. @@ -995,7 +995,6 @@ components: - confidence - created_at - detector_id - - source - text LabelValueRequest: type: object @@ -1331,6 +1330,7 @@ components: minimum: 0.0 maximum: 1.0 source: + description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING @@ -1345,7 +1345,6 @@ components: - 'NO' - UNCLEAR required: - - source - label CountingResult: type: object @@ -1356,6 +1355,7 @@ components: minimum: 0.0 maximum: 1.0 source: + description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING @@ -1370,7 +1370,6 @@ components: greater_than_max: type: boolean required: - - source - count MultiClassificationResult: type: object @@ -1381,6 +1380,7 @@ components: minimum: 0.0 maximum: 1.0 source: + description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING @@ -1391,7 +1391,6 @@ components: label: type: string required: - - source - label CountModeConfiguration: type: object diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 17bb6834..55130782 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -258,7 +258,7 @@ def list_detectors(self, page: int = 1, page_size: int = 10) -> PaginatedDetecto ) return PaginatedDetectorList.parse_obj(obj.to_dict()) - def _prep_create_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals + def _prep_create_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals self, name: str, query: str, diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 5044a45f..8ca22d58 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -307,7 +307,7 @@ def reset_detector(self, detector: Union[str, Detector]) -> None: detector = detector.id self.detector_reset_api.reset_detector(detector) - def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals + def create_counting_detector( # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals self, name: str, query: str,