-
Notifications
You must be signed in to change notification settings - Fork 8
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: handling json file containing responses from a generative model (…
…#208) * feat: add text-generation as new model type, handle the new model type, set schema fields as optional, edit test * fix: ruff check * feat: add optional schema fields to models definition (sdk) * feat: set optional fields model schema (spark side) * handle json response * feat: editing json handling
- Loading branch information
Showing
16 changed files
with
908 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
46 changes: 46 additions & 0 deletions
46
api/alembic/versions/e72dc7aaa4cc_add_dataset_and_metrics_completion.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
"""add_dataset_and_metrics_completion | ||
Revision ID: e72dc7aaa4cc | ||
Revises: dccb82489f4d | ||
Create Date: 2024-12-11 13:33:38.759485 | ||
""" | ||
from typing import Sequence, Union, Text | ||
|
||
from alembic import op | ||
import sqlalchemy as sa | ||
from app.db.tables.commons.json_encoded_dict import JSONEncodedDict | ||
|
||
# revision identifiers, used by Alembic. | ||
revision: str = 'e72dc7aaa4cc' | ||
down_revision: Union[str, None] = 'dccb82489f4d' | ||
branch_labels: Union[str, Sequence[str], None] = None | ||
depends_on: Union[str, Sequence[str], None] = None | ||
|
||
|
||
def upgrade() -> None: | ||
# ### commands auto generated by Alembic - please adjust! ### | ||
op.create_table('completion_dataset', | ||
sa.Column('UUID', sa.UUID(), nullable=False), | ||
sa.Column('MODEL_UUID', sa.UUID(), nullable=False), | ||
sa.Column('PATH', sa.VARCHAR(), nullable=False), | ||
sa.Column('DATE', sa.TIMESTAMP(timezone=True), nullable=False), | ||
sa.Column('STATUS', sa.VARCHAR(), nullable=False), | ||
sa.ForeignKeyConstraint(['MODEL_UUID'], ['model.UUID'], name=op.f('fk_completion_dataset_MODEL_UUID_model')), | ||
sa.PrimaryKeyConstraint('UUID', name=op.f('pk_completion_dataset')) | ||
) | ||
op.create_table('completion_dataset_metrics', | ||
sa.Column('UUID', sa.UUID(), nullable=False), | ||
sa.Column('COMPLETION_UUID', sa.UUID(), nullable=False), | ||
sa.Column('MODEL_QUALITY', JSONEncodedDict(astext_type=Text()), nullable=True), | ||
sa.ForeignKeyConstraint(['COMPLETION_UUID'], ['completion_dataset.UUID'], name=op.f('fk_completion_dataset_metrics_COMPLETION_UUID_completion_dataset')), | ||
sa.PrimaryKeyConstraint('UUID', name=op.f('pk_completion_dataset_metrics')) | ||
) | ||
# ### end Alembic commands ### | ||
|
||
|
||
def downgrade() -> None: | ||
# ### commands auto generated by Alembic - please adjust! ### | ||
op.drop_table('completion_dataset_metrics') | ||
op.drop_table('completion_dataset') | ||
# ### end Alembic commands ### |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
import re | ||
from typing import List, Optional | ||
from uuid import UUID | ||
|
||
from fastapi_pagination import Page, Params | ||
from fastapi_pagination.ext.sqlalchemy import paginate | ||
from sqlalchemy import asc, desc | ||
from sqlalchemy.future import select as future_select | ||
|
||
from app.db.database import Database | ||
from app.db.tables.completion_dataset_table import CompletionDataset | ||
from app.models.dataset_dto import OrderType | ||
|
||
|
||
class CompletionDatasetDAO: | ||
def __init__(self, database: Database) -> None: | ||
self.db = database | ||
|
||
def insert_completion_dataset( | ||
self, completion_dataset: CompletionDataset | ||
) -> CompletionDataset: | ||
with self.db.begin_session() as session: | ||
session.add(completion_dataset) | ||
session.flush() | ||
return completion_dataset | ||
|
||
def get_completion_dataset_by_model_uuid( | ||
self, model_uuid: UUID, completion_uuid: UUID | ||
) -> Optional[CompletionDataset]: | ||
with self.db.begin_session() as session: | ||
return ( | ||
session.query(CompletionDataset) | ||
.where( | ||
CompletionDataset.model_uuid == model_uuid, | ||
CompletionDataset.uuid == completion_uuid, | ||
) | ||
.one_or_none() | ||
) | ||
|
||
def get_latest_completion_dataset_by_model_uuid( | ||
self, model_uuid: UUID | ||
) -> Optional[CompletionDataset]: | ||
with self.db.begin_session() as session: | ||
return ( | ||
session.query(CompletionDataset) | ||
.order_by(desc(CompletionDataset.date)) | ||
.where(CompletionDataset.model_uuid == model_uuid) | ||
.limit(1) | ||
.one_or_none() | ||
) | ||
|
||
def get_all_completion_datasets_by_model_uuid( | ||
self, | ||
model_uuid: UUID, | ||
) -> List[CompletionDataset]: | ||
with self.db.begin_session() as session: | ||
return ( | ||
session.query(CompletionDataset) | ||
.order_by(desc(CompletionDataset.date)) | ||
.where(CompletionDataset.model_uuid == model_uuid) | ||
) | ||
|
||
def get_all_completion_datasets_by_model_uuid_paginated( | ||
self, | ||
model_uuid: UUID, | ||
params: Params = Params(), | ||
order: OrderType = OrderType.ASC, | ||
sort: Optional[str] = None, | ||
) -> Page[CompletionDataset]: | ||
def order_by_column_name(column_name): | ||
return CompletionDataset.__getattribute__( | ||
CompletionDataset, re.sub('(?=[A-Z])', '_', column_name).lower() | ||
) | ||
|
||
with self.db.begin_session() as session: | ||
stmt = future_select(CompletionDataset).where( | ||
CompletionDataset.model_uuid == model_uuid | ||
) | ||
|
||
if sort: | ||
stmt = ( | ||
stmt.order_by(asc(order_by_column_name(sort))) | ||
if order == OrderType.ASC | ||
else stmt.order_by(desc(order_by_column_name(sort))) | ||
) | ||
|
||
return paginate(session, stmt, params) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
from uuid import uuid4 | ||
|
||
from sqlalchemy import UUID, Column, ForeignKey | ||
|
||
from app.db.dao.base_dao import BaseDAO | ||
from app.db.database import BaseTable, Reflected | ||
from app.db.tables.commons.json_encoded_dict import JSONEncodedDict | ||
|
||
|
||
class CompletionDatasetMetrics(Reflected, BaseTable, BaseDAO): | ||
__tablename__ = 'completion_dataset_metrics' | ||
|
||
uuid = Column( | ||
'UUID', | ||
UUID(as_uuid=True), | ||
nullable=False, | ||
default=uuid4, | ||
primary_key=True, | ||
) | ||
completion_uuid = Column( | ||
'COMPLETION_UUID', | ||
UUID(as_uuid=True), | ||
ForeignKey('completion_dataset.UUID'), | ||
nullable=False, | ||
) | ||
model_quality = Column('MODEL_QUALITY', JSONEncodedDict, nullable=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
from uuid import uuid4 | ||
|
||
from sqlalchemy import TIMESTAMP, UUID, VARCHAR, Column, ForeignKey | ||
|
||
from app.db.dao.base_dao import BaseDAO | ||
from app.db.database import BaseTable, Reflected | ||
from app.models.job_status import JobStatus | ||
|
||
|
||
class CompletionDataset(Reflected, BaseTable, BaseDAO): | ||
__tablename__ = 'completion_dataset' | ||
|
||
uuid = Column( | ||
'UUID', | ||
UUID(as_uuid=True), | ||
nullable=False, | ||
default=uuid4, | ||
primary_key=True, | ||
) | ||
model_uuid = Column( | ||
'MODEL_UUID', UUID(as_uuid=True), ForeignKey('model.UUID'), nullable=False | ||
) | ||
path = Column('PATH', VARCHAR, nullable=False) | ||
date = Column('DATE', TIMESTAMP(timezone=True), nullable=False) | ||
status = Column('STATUS', VARCHAR, nullable=False, default=JobStatus.IMPORTING) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
from typing import Dict, List, Optional | ||
|
||
from pydantic import BaseModel, RootModel, model_validator | ||
|
||
|
||
class TokenLogProbs(BaseModel): | ||
token: str | ||
bytes: List[int] | ||
logprob: float | ||
top_logprobs: List[Dict[str, float]] | ||
|
||
|
||
class LogProbs(BaseModel): | ||
content: List[TokenLogProbs] | ||
refusal: Optional[str] = None | ||
|
||
|
||
class Message(BaseModel): | ||
content: str | ||
refusal: Optional[str] = None | ||
role: str | ||
tool_calls: List = [] | ||
parsed: Optional[dict] = None | ||
|
||
|
||
class Choice(BaseModel): | ||
finish_reason: str | ||
index: int | ||
logprobs: Optional[LogProbs] = None | ||
message: Message | ||
|
||
@model_validator(mode='after') | ||
def validate_logprobs(self): | ||
if self.logprobs is None: | ||
raise ValueError( | ||
"the 'logprobs' field cannot be empty, metrics could not be computed." | ||
) | ||
return self | ||
|
||
|
||
class UsageDetails(BaseModel): | ||
accepted_prediction_tokens: int = 0 | ||
reasoning_tokens: int = 0 | ||
rejected_prediction_tokens: int = 0 | ||
audio_tokens: Optional[int] = None | ||
cached_tokens: Optional[int] = None | ||
|
||
|
||
class Usage(BaseModel): | ||
completion_tokens: int | ||
prompt_tokens: int | ||
total_tokens: int | ||
completion_tokens_details: UsageDetails | ||
prompt_tokens_details: Optional[UsageDetails] = None | ||
|
||
|
||
class Completion(BaseModel): | ||
id: str | ||
choices: List[Choice] | ||
created: int | ||
model: str | ||
object: str | ||
system_fingerprint: str | ||
usage: Usage | ||
|
||
|
||
class CompletionResponses(RootModel[List[Completion]]): | ||
@model_validator(mode='before') | ||
@classmethod | ||
def handle_single_completion(cls, data): | ||
"""If a single object is passed instead of a list, wrap it into a list.""" | ||
if isinstance(data, dict): | ||
return [data] | ||
if isinstance(data, list): | ||
return data | ||
raise ValueError( | ||
'Input file must be a list of completion json or a single completion json' | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.