Skip to content

Commit

Permalink
use control.label() instead of lower level fns
Browse files Browse the repository at this point in the history
  • Loading branch information
matt-bernstein committed Nov 26, 2024
1 parent 8ea2c3d commit b155e8a
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 27 deletions.
16 changes: 0 additions & 16 deletions src/label_studio_sdk/label_interface/control_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,13 +505,6 @@ class SpanSelectionOffsets(SpanSelection):
class ChoicesValue(BaseModel):
choices: List[str]

# I don't know how Choices predictions with choice != 'multiple' was working without this...
@validator("choices", pre=True, always=True)
def coerce_to_list(cls, value: Union[str, List[str]]):
if isinstance(value, str):
return [value]
return value


class ChoicesTag(ControlTag):
""" """
Expand Down Expand Up @@ -547,15 +540,6 @@ def to_json_schema(self):
"description": f"Choices for {self.to_name[0]}"
}

def _validate_labels(self, labels):
if super()._validate_labels(labels):
return True

# HACK to continue to support single-item output in json schema
if not self.is_multiple_choice and isinstance(labels, str):
return super()._validate_labels([labels])



class LabelsValue(SpanSelection):
labels: List[str]
Expand Down
63 changes: 52 additions & 11 deletions src/label_studio_sdk/label_interface/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -840,23 +840,64 @@ def generate_sample_task(self, mode="upload", secure_mode=False):

return task

def generate_sample_prediction(self):
""" """
def generate_sample_prediction(self) -> Optional[dict]:
"""Generates a sample prediction that is valid for this label config.
Example:
{'model_version': 'sample model version',
'score': 0.0,
'result': [{'id': 'e7bd76e6-4e88-4eb3-b433-55e03661bf5d',
'from_name': 'sentiment',
'to_name': 'text',
'type': 'choices',
'value': {'choices': ['Neutral']}}]}
NOTE: `id` field in result is not required when importing predictions; it will be generated automatically.
"""
prediction = PredictionValue(
model_version='sample model version',
result=[
{
'from_name': control.name,
'to_name': control.to_name[0],
'type': control.tag,
# TODO: put special case for choices in generation instead of validation
'value': {control._label_attr_name: JSF(control.to_json_schema()).generate()}
} for control in self.controls
control.label(JSF(control.to_json_schema()).generate())
for control in self.controls
]
)
prediction_dct = prediction.model_dump()
assert self.validate_prediction(prediction_dct), 'could not generate a sample prediction'
return prediction_dct
if self.validate_prediction(prediction_dct):
return prediction_dct
else:
logger.debug(f'Sample prediction {prediction_dct} failed validation for label config {self.config}')
return None

def generate_sample_annotation(self) -> Optional[dict]:
"""Generates a sample annotation that is valid for this label config.
Example:
{'was_cancelled': False,
'ground_truth': False,
'lead_time': 0.0,
'result_count': 0,
'completed_by': -1,
'result': [{'id': 'b05da11d-3ffc-4657-8b8d-f5bc37cd59ac',
'from_name': 'sentiment',
'to_name': 'text',
'type': 'choices',
'value': {'choices': ['Negative']}}]}
NOTE: `id` field in result is not required when importing predictions; it will be generated automatically.
"""
annotation = AnnotationValue(
completed_by=-1, # annotator's user id
result=[
control.label(JSF(control.to_json_schema()).generate())
for control in self.controls
]
)
annotation_dct = annotation.model_dump()
if self.validate_annotation(annotation_dct):
return annotation_dct
else:
logger.debug(f'Sample annotation {annotation_dct} failed validation for label config {self.config}')
return None

#####
##### COMPATIBILITY LAYER
Expand Down

0 comments on commit b155e8a

Please sign in to comment.