Skip to content

Commit

Permalink
update openapi specs for different targets of balancing strategy (#1533)
Browse files Browse the repository at this point in the history
* update openapi specs for different targets of balancing strategy

* regenerated with new endpoint

* regenerated with new endpoint

* set isDefault

---------

Co-authored-by: Jeremy A. Prescott <[email protected]>
  • Loading branch information
ersi-lightly and japrescott authored May 6, 2024
1 parent 80fa5b5 commit 919168b
Show file tree
Hide file tree
Showing 18 changed files with 406 additions and 61 deletions.
1 change: 1 addition & 0 deletions lightly/openapi_generated/swagger_client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry import SelectionConfigV4Entry
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_input import SelectionConfigV4EntryInput
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_strategy import SelectionConfigV4EntryStrategy
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_strategy_all_of import SelectionConfigV4EntryStrategyAllOf
from lightly.openapi_generated.swagger_client.models.selection_input_predictions_name import SelectionInputPredictionsName
from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType
from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation
Expand Down
88 changes: 66 additions & 22 deletions lightly/openapi_generated/swagger_client/api/datasets_api.py

Large diffs are not rendered by default.

183 changes: 170 additions & 13 deletions lightly/openapi_generated/swagger_client/api/docker_api.py

Large diffs are not rendered by default.

45 changes: 39 additions & 6 deletions lightly/openapi_generated/swagger_client/api/predictions_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated

from pydantic import Field, conint, conlist, constr, validator
from pydantic import Field, StrictBool, conint, conlist, constr, validator

from typing import List, Optional

Expand Down Expand Up @@ -711,20 +711,26 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id :
_request_auth=_params.get('_request_auth'))

@validate_arguments
def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501
def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, page_size, page_offset, lean, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:type prediction_uuid_timestamp: int
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param lean: if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE
:type lean: bool
:param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
Expand All @@ -741,23 +747,29 @@ def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=Tru
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_predictions_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501
return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, page_size, page_offset, lean, task_name, **kwargs) # noqa: E501

@validate_arguments
def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501
def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, page_size, page_offset, lean, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:type prediction_uuid_timestamp: int
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param lean: if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE
:type lean: bool
:param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
Expand Down Expand Up @@ -790,6 +802,9 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co
_all_params = [
'dataset_id',
'prediction_uuid_timestamp',
'page_size',
'page_offset',
'lean',
'task_name'
]
_all_params.extend(
Expand Down Expand Up @@ -830,6 +845,24 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))

if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))

if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))

if _params.get('lean') is not None: # noqa: E501
_query_params.append((
'lean',
_params['lean'].value if hasattr(_params['lean'], 'value') else _params['lean']
))

if _params.get('task_name') is not None: # noqa: E501
_query_params.append((
'taskName',
Expand Down
8 changes: 4 additions & 4 deletions lightly/openapi_generated/swagger_client/api/samples_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1527,7 +1527,7 @@ def get_samples_partial_by_dataset_id_with_http_info(self, dataset_id : Annotate
_request_auth=_params.get('_request_auth'))

@validate_arguments
def update_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Optional[StrictBool] = None, **kwargs) -> None: # noqa: E501
def update_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Annotated[Optional[StrictBool], Field(description="Deprecated, is ignored! If we should also update the dataset with the new sample information (like total size)")] = None, **kwargs) -> None: # noqa: E501
"""update_sample_by_id # noqa: E501
update a specific sample of a dataset # noqa: E501
Expand All @@ -1543,7 +1543,7 @@ def update_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(
:type sample_id: str
:param sample_update_request: The updated sample to set (required)
:type sample_update_request: SampleUpdateRequest
:param enable_dataset_update:
:param enable_dataset_update: Deprecated, is ignored! If we should also update the dataset with the new sample information (like total size)
:type enable_dataset_update: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
Expand All @@ -1562,7 +1562,7 @@ def update_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(
return self.update_sample_by_id_with_http_info(dataset_id, sample_id, sample_update_request, enable_dataset_update, **kwargs) # noqa: E501

@validate_arguments
def update_sample_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Optional[StrictBool] = None, **kwargs) -> ApiResponse: # noqa: E501
def update_sample_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Annotated[Optional[StrictBool], Field(description="Deprecated, is ignored! If we should also update the dataset with the new sample information (like total size)")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""update_sample_by_id # noqa: E501
update a specific sample of a dataset # noqa: E501
Expand All @@ -1578,7 +1578,7 @@ def update_sample_by_id_with_http_info(self, dataset_id : Annotated[constr(stric
:type sample_id: str
:param sample_update_request: The updated sample to set (required)
:type sample_update_request: SampleUpdateRequest
:param enable_dataset_update:
:param enable_dataset_update: Deprecated, is ignored! If we should also update the dataset with the new sample information (like total size)
:type enable_dataset_update: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry import SelectionConfigV4Entry
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_input import SelectionConfigV4EntryInput
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_strategy import SelectionConfigV4EntryStrategy
from lightly.openapi_generated.swagger_client.models.selection_config_v4_entry_strategy_all_of import SelectionConfigV4EntryStrategyAllOf
from lightly.openapi_generated.swagger_client.models.selection_input_predictions_name import SelectionInputPredictionsName
from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType
from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class DatasourceConfigS3DelegatedAccess(DatasourceConfigBase):
"""
full_path: StrictStr = Field(..., alias="fullPath", description="path includes the bucket name and the path within the bucket where you have stored your information")
s3_region: S3Region = Field(..., alias="s3Region")
s3_external_id: constr(strict=True, min_length=10) = Field(..., alias="s3ExternalId", description="The external ID specified when creating the role.")
s3_external_id: constr(strict=True, min_length=10) = Field(..., alias="s3ExternalId", description="The external ID specified when creating the role. More information can be found here: - https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_externalid ")
s3_arn: constr(strict=True, min_length=12) = Field(..., alias="s3ARN", description="The ARN of the role you created")
s3_server_side_encryption_kms_key: Optional[constr(strict=True, min_length=1)] = Field(None, alias="s3ServerSideEncryptionKMSKey", description="If set, Lightly Worker will automatically set the headers to use server side encryption https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html with this value as the appropriate KMS key arn. This will encrypt the files created by Lightly (crops, frames, thumbnails) in the S3 bucket. ")
__properties = ["id", "purpose", "type", "thumbSuffix", "fullPath", "s3Region", "s3ExternalId", "s3ARN", "s3ServerSideEncryptionKMSKey"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class DatasourceConfigS3DelegatedAccessAllOf(BaseModel):
DatasourceConfigS3DelegatedAccessAllOf
"""
s3_region: S3Region = Field(..., alias="s3Region")
s3_external_id: constr(strict=True, min_length=10) = Field(..., alias="s3ExternalId", description="The external ID specified when creating the role.")
s3_external_id: constr(strict=True, min_length=10) = Field(..., alias="s3ExternalId", description="The external ID specified when creating the role. More information can be found here: - https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_externalid ")
s3_arn: constr(strict=True, min_length=12) = Field(..., alias="s3ARN", description="The ARN of the role you created")
s3_server_side_encryption_kms_key: Optional[constr(strict=True, min_length=1)] = Field(None, alias="s3ServerSideEncryptionKMSKey", description="If set, Lightly Worker will automatically set the headers to use server side encryption https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html with this value as the appropriate KMS key arn. This will encrypt the files created by Lightly (crops, frames, thumbnails) in the S3 bucket. ")
__properties = ["s3Region", "s3ExternalId", "s3ARN", "s3ServerSideEncryptionKMSKey"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class DelegatedAccessExternalIdsInner(BaseModel):
"""
DelegatedAccessExternalIdsInner
"""
external_id: constr(strict=True, min_length=10) = Field(..., alias="externalId", description="The external ID specified when creating the role.")
external_id: constr(strict=True, min_length=10) = Field(..., alias="externalId", description="The external ID specified when creating the role. More information can be found here: - https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_externalid ")
user_id: Optional[StrictStr] = Field(None, alias="userId")
team_id: Optional[StrictStr] = Field(None, alias="teamId")
__properties = ["externalId", "userId", "teamId"]
Expand Down
Loading

0 comments on commit 919168b

Please sign in to comment.