diff --git a/src/label_studio_sdk/label_interface/control_tags.py b/src/label_studio_sdk/label_interface/control_tags.py index a738cfd..45e0213 100644 --- a/src/label_studio_sdk/label_interface/control_tags.py +++ b/src/label_studio_sdk/label_interface/control_tags.py @@ -505,13 +505,6 @@ class SpanSelectionOffsets(SpanSelection): class ChoicesValue(BaseModel): choices: List[str] - # I don't know how Choices predictions with choice != 'multiple' was working without this... - @validator("choices", pre=True, always=True) - def coerce_to_list(cls, value: Union[str, List[str]]): - if isinstance(value, str): - return [value] - return value - class ChoicesTag(ControlTag): """ """ @@ -547,15 +540,6 @@ def to_json_schema(self): "description": f"Choices for {self.to_name[0]}" } - def _validate_labels(self, labels): - if super()._validate_labels(labels): - return True - - # HACK to continue to support single-item output in json schema - if not self.is_multiple_choice and isinstance(labels, str): - return super()._validate_labels([labels]) - - class LabelsValue(SpanSelection): labels: List[str] diff --git a/src/label_studio_sdk/label_interface/interface.py b/src/label_studio_sdk/label_interface/interface.py index 3ac81a5..a3f94d9 100644 --- a/src/label_studio_sdk/label_interface/interface.py +++ b/src/label_studio_sdk/label_interface/interface.py @@ -840,23 +840,64 @@ def generate_sample_task(self, mode="upload", secure_mode=False): return task - def generate_sample_prediction(self): - """ """ + def generate_sample_prediction(self) -> Optional[dict]: + """Generates a sample prediction that is valid for this label config. + + Example: + {'model_version': 'sample model version', + 'score': 0.0, + 'result': [{'id': 'e7bd76e6-4e88-4eb3-b433-55e03661bf5d', + 'from_name': 'sentiment', + 'to_name': 'text', + 'type': 'choices', + 'value': {'choices': ['Neutral']}}]} + + NOTE: `id` field in result is not required when importing predictions; it will be generated automatically. + """ prediction = PredictionValue( model_version='sample model version', result=[ - { - 'from_name': control.name, - 'to_name': control.to_name[0], - 'type': control.tag, - # TODO: put special case for choices in generation instead of validation - 'value': {control._label_attr_name: JSF(control.to_json_schema()).generate()} - } for control in self.controls + control.label(JSF(control.to_json_schema()).generate()) + for control in self.controls ] ) prediction_dct = prediction.model_dump() - assert self.validate_prediction(prediction_dct), 'could not generate a sample prediction' - return prediction_dct + if self.validate_prediction(prediction_dct): + return prediction_dct + else: + logger.debug(f'Sample prediction {prediction_dct} failed validation for label config {self.config}') + return None + + def generate_sample_annotation(self) -> Optional[dict]: + """Generates a sample annotation that is valid for this label config. + + Example: + {'was_cancelled': False, + 'ground_truth': False, + 'lead_time': 0.0, + 'result_count': 0, + 'completed_by': -1, + 'result': [{'id': 'b05da11d-3ffc-4657-8b8d-f5bc37cd59ac', + 'from_name': 'sentiment', + 'to_name': 'text', + 'type': 'choices', + 'value': {'choices': ['Negative']}}]} + + NOTE: `id` field in result is not required when importing predictions; it will be generated automatically. + """ + annotation = AnnotationValue( + completed_by=-1, # annotator's user id + result=[ + control.label(JSF(control.to_json_schema()).generate()) + for control in self.controls + ] + ) + annotation_dct = annotation.model_dump() + if self.validate_annotation(annotation_dct): + return annotation_dct + else: + logger.debug(f'Sample annotation {annotation_dct} failed validation for label config {self.config}') + return None ##### ##### COMPATIBILITY LAYER