diff --git a/.vscode/settings.json b/.vscode/settings.json index 6627e3a..09c683f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,6 +2,5 @@ "isort.args": ["--profile", "black"], "[python]": { "editor.defaultFormatter": "ms-python.black-formatter" - }, - "python.formatting.provider": "none" + } } diff --git a/Makefile b/Makefile index 633fe65..0e6b701 100644 --- a/Makefile +++ b/Makefile @@ -5,9 +5,11 @@ COMPOSE_RUN_TOOLING = UID=${UID} GID=${GID} docker compose -f docker-compose.yml COMPOSE_APP_DEV = docker compose -f docker-compose.yml -f docker-compose.override.yml help: ## Show this help - @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' + @printf "\nUSAGE: make [command] \n\n" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' + @printf '\n' -env: ## Switch to an environment config +env: ## Switch to an environment config @mkdir -p config/active rm -rf config/active/* cp -r config/${ENV}/* config/active/ @@ -57,5 +59,11 @@ build-dev: ## Builds development docker images start-dev: ## Starts development environment $(COMPOSE_APP_DEV) up -d -clean-dev: ## Cleans development environment +clean-dev: ## Cleans development environment containers $(COMPOSE_APP_DEV) down --remove-orphans + +reset-dev: ## Resets config, data and containers to default states + make env ENV=dev + $(COMPOSE_RUN_TOOLING) python manage.py flush --no-input + $(COMPOSE_APP_DEV) down --remove-orphans --volumes + make start-dev diff --git a/profiles/admin.py b/profiles/admin.py index 27f9a51..408be57 100644 --- a/profiles/admin.py +++ b/profiles/admin.py @@ -1,6 +1,16 @@ from django.contrib import admin -from profiles.models import Beatmap, OsuUser, Score, ScoreFilter, UserStats +from profiles.models import ( + Beatmap, + DifficultyCalculation, + DifficultyValue, + OsuUser, + PerformanceCalculation, + PerformanceValue, + Score, + ScoreFilter, + UserStats, +) class UserStatsAdmin(admin.ModelAdmin): @@ -13,6 +23,16 @@ class BeatmapAdmin(admin.ModelAdmin): raw_id_fields = ("creator",) +class DifficultyCalculationAdmin(admin.ModelAdmin): + model = DifficultyCalculation + raw_id_fields = ("beatmap",) + + +class DifficultyValueAdmin(admin.ModelAdmin): + model = DifficultyCalculation + raw_id_fields = ("calculation",) + + class ScoreAdmin(admin.ModelAdmin): model = Score raw_id_fields = ( @@ -21,8 +41,25 @@ class ScoreAdmin(admin.ModelAdmin): ) +class PerformanceCalculationAdmin(admin.ModelAdmin): + model = PerformanceCalculation + raw_id_fields = ( + "score", + "difficulty_calculation", + ) + + +class PerformanceValueAdmin(admin.ModelAdmin): + model = PerformanceCalculation + raw_id_fields = ("calculation",) + + admin.site.register(OsuUser) admin.site.register(UserStats, UserStatsAdmin) admin.site.register(Beatmap, BeatmapAdmin) +admin.site.register(DifficultyCalculation, DifficultyCalculationAdmin) +admin.site.register(DifficultyValue, DifficultyValueAdmin) admin.site.register(Score, ScoreAdmin) +admin.site.register(PerformanceCalculation, PerformanceCalculationAdmin) +admin.site.register(PerformanceValue, PerformanceValueAdmin) admin.site.register(ScoreFilter) diff --git a/profiles/management/commands/calculationstatus.py b/profiles/management/commands/calculationstatus.py new file mode 100644 index 0000000..4089b8b --- /dev/null +++ b/profiles/management/commands/calculationstatus.py @@ -0,0 +1,106 @@ +from django.core.management.base import BaseCommand +from django.db.models import QuerySet + +from common.osu.difficultycalculator import DifficultyCalculator +from common.osu.enums import Gamemode +from profiles.models import Beatmap, Score + + +class Command(BaseCommand): + help = "Displays current db calculation status" + + def add_arguments(self, parser): + parser.add_argument( + "--v2", + action="store_true", + help="Use new difficulty and performance models", + ) + + def handle(self, *args, **options): + # the v2 flag is used to determine whether to use the new difficulty and performance models + v2 = options["v2"] + + # TODO: iterate over supported gamemodes + gamemode = Gamemode.STANDARD + + self.stdout.write( + f"Gamemode: {Gamemode(gamemode).name}\n" + f"Difficulty Calculator Engine: {DifficultyCalculator.engine()}\n" + f"Difficulty Calculator Version: {DifficultyCalculator.version()}\n" + ) + + if v2: + beatmaps = Beatmap.objects.filter(gamemode=gamemode) + outdated_beatmap_count = self.get_outdated_beatmap_count_v2(beatmaps) + + scores = Score.objects.filter(gamemode=gamemode) + outdated_score_count = self.get_outdated_score_count_v2(scores) + else: + beatmaps = Beatmap.objects.filter(gamemode=gamemode) + outdated_beatmap_count = self.get_outdated_beatmap_count(beatmaps) + + scores = Score.objects.filter(gamemode=gamemode) + outdated_score_count = self.get_outdated_score_count(scores) + + beatmap_count = beatmaps.count() + up_to_date_beatmap_count = beatmap_count - outdated_beatmap_count + + score_count = scores.count() + up_to_date_score_count = score_count - outdated_score_count + + if up_to_date_beatmap_count == 0: + beatmap_output_style = self.style.ERROR + elif up_to_date_beatmap_count == beatmap_count: + beatmap_output_style = self.style.SUCCESS + else: + beatmap_output_style = self.style.WARNING + + if up_to_date_score_count == 0: + score_output_style = self.style.ERROR + elif up_to_date_score_count == score_count: + score_output_style = self.style.SUCCESS + else: + score_output_style = self.style.WARNING + + self.stdout.write( + beatmap_output_style( + f"Up-to-date Beatmaps: {up_to_date_beatmap_count} / {beatmap_count} ({(up_to_date_beatmap_count / beatmap_count) * 100:.2f}%)" + ) + ) + self.stdout.write( + score_output_style( + f"Up-to-date Scores: {up_to_date_score_count} / {score_count} ({(up_to_date_score_count / score_count) * 100:.2f}%)" + ) + ) + + def get_outdated_beatmap_count(self, beatmaps: QuerySet[Beatmap]): + beatmaps_to_recalculate = beatmaps.exclude( + difficulty_calculator_engine=DifficultyCalculator.engine(), + difficulty_calculator_version=DifficultyCalculator.version(), + ).order_by("pk") + + return beatmaps_to_recalculate.count() + + def get_outdated_score_count(self, scores: QuerySet[Score]): + scores_to_recalculate = scores.exclude( + difficulty_calculator_engine=DifficultyCalculator.engine(), + difficulty_calculator_version=DifficultyCalculator.version(), + ).order_by("pk") + + return scores_to_recalculate.count() + + def get_outdated_beatmap_count_v2(self, beatmaps: QuerySet[Beatmap]): + beatmaps_to_recalculate = beatmaps.exclude( + difficulty_calculations__calculator_engine=DifficultyCalculator.engine(), + difficulty_calculations__calculator_version=DifficultyCalculator.version(), + ) + + return beatmaps_to_recalculate.count() + + def get_outdated_score_count_v2(self, scores: QuerySet[Score]): + scores_to_recalculate = scores.exclude( + performance_calculations__calculator_engine=DifficultyCalculator.engine(), + performance_calculations__calculator_version=DifficultyCalculator.version(), + ) + + return scores_to_recalculate.count() diff --git a/profiles/management/commands/recalculate.py b/profiles/management/commands/recalculate.py index 9039d7b..34db1ce 100644 --- a/profiles/management/commands/recalculate.py +++ b/profiles/management/commands/recalculate.py @@ -2,13 +2,22 @@ from django.core.management.base import BaseCommand from django.core.paginator import Paginator -from django.db.models import QuerySet +from django.db import transaction +from django.db.models import Count, QuerySet from tqdm import tqdm from common.osu.difficultycalculator import DifficultyCalculator -from common.osu.enums import Gamemode +from common.osu.enums import Gamemode, Mods from leaderboards.models import Membership -from profiles.models import Beatmap, Score, UserStats +from profiles.models import ( + Beatmap, + DifficultyCalculation, + DifficultyValue, + PerformanceCalculation, + PerformanceValue, + Score, + UserStats, +) class Command(BaseCommand): @@ -21,10 +30,17 @@ def add_arguments(self, parser): action="store_true", help="Force recalculation of beatmaps and scores even if already up to date", ) + parser.add_argument( + "--v2", + action="store_true", + help="Use new difficulty and performance models", + ) def handle(self, *args, **options): gamemode = options["gamemode"][0] force = options["force"] + # the v2 flag is used to determine whether to use the new difficulty and performance models + v2 = options["v2"] if gamemode != Gamemode.STANDARD: self.stdout.write( @@ -40,29 +56,31 @@ def handle(self, *args, **options): f"Difficulty Calculator Version: {DifficultyCalculator.version()}\n" ) - # Recalculate beatmaps - beatmaps = Beatmap.objects.filter(gamemode=gamemode) - - self.recalculate_beatmaps(beatmaps, force) + if v2: + # Recalculate beatmaps + beatmaps = Beatmap.objects.filter(gamemode=gamemode) + self.recalculate_beatmaps_v2(beatmaps, force) - # Recalculate scores - - scores = Score.objects.filter(gamemode=gamemode) + # Recalculate scores + scores = Score.objects.filter(gamemode=gamemode) + self.recalculate_scores_v2(scores, force) + else: + # Recalculate beatmaps + beatmaps = Beatmap.objects.filter(gamemode=gamemode) + self.recalculate_beatmaps(beatmaps, force) - self.recalculate_scores(scores, force) + # Recalculate scores + scores = Score.objects.filter(gamemode=gamemode) + self.recalculate_scores(scores, force) # Recalculate user stats - all_user_stats = UserStats.objects.filter(gamemode=gamemode) - self.recalculate_user_stats(all_user_stats) # Recalculate memberships - memberships = Membership.objects.select_related("leaderboard").filter( leaderboard__gamemode=gamemode ) - self.recalculate_memberships(memberships) def recalculate_beatmap_page(self, page: Iterable[Beatmap], progress_bar: tqdm): @@ -177,6 +195,210 @@ def recalculate_scores(self, scores: QuerySet[Score], force: bool = False): ) ) + def recalculate_beatmaps_v2(self, beatmaps: QuerySet[Beatmap], force: bool = False): + if force: + self.stdout.write(f"Forcing recalculation of all beatmaps...") + + paginator = Paginator(beatmaps.order_by("pk"), per_page=2000) + + with tqdm(desc="Beatmaps", total=beatmaps.count(), smoothing=0) as pbar: + for page in paginator: + self.recalculate_beatmap_page_v2(page, pbar) + else: + beatmaps_to_recalculate = beatmaps.exclude( + difficulty_calculations__calculator_engine=DifficultyCalculator.engine(), + difficulty_calculations__calculator_version=DifficultyCalculator.version(), + ) + + if beatmaps_to_recalculate.count() == 0: + self.stdout.write(f"All {beatmaps.count()} beatmaps already up to date") + return + + count_up_to_date = beatmaps.count() - beatmaps_to_recalculate.count() + + if count_up_to_date > 0: + self.stdout.write( + f"Found {count_up_to_date} beatmaps already up to date. Resuming..." + ) + + with tqdm( + desc="Beatmaps", + total=beatmaps.count(), + initial=count_up_to_date, + smoothing=0, + ) as pbar: + while len(page := beatmaps_to_recalculate[:2000]) > 0: + self.recalculate_beatmap_page_v2(page, pbar) + + self.stdout.write( + self.style.SUCCESS( + f"Successfully updated {beatmaps.count()} beatmaps' difficulty values" + ) + ) + + @transaction.atomic + def recalculate_beatmap_page_v2(self, page: Iterable[Beatmap], progress_bar: tqdm): + calculations = [] + beatmap_ids = [] + for beatmap in page: + calculations.append( + DifficultyCalculation( + beatmap_id=beatmap.id, + mods=Mods.NONE, + calculator_engine=DifficultyCalculator.engine(), + calculator_version=DifficultyCalculator.version(), + ) + ) + beatmap_ids.append(beatmap.id) + + # Create calculations + DifficultyCalculation.objects.bulk_create(calculations, ignore_conflicts=True) + calculations = DifficultyCalculation.objects.filter( + beatmap_id__in=beatmap_ids, + mods=Mods.NONE, + calculator_engine=DifficultyCalculator.engine(), + calculator_version=DifficultyCalculator.version(), + ) + + # Perform calculations + values = [] + for calculation in calculations: + values.extend(calculation.calculate_difficulty_values(DifficultyCalculator)) + progress_bar.update() + + # Create values + DifficultyValue.objects.bulk_create( + values, + update_conflicts=True, + update_fields=["value"], + unique_fields=["calculation_id", "name"], + ) + + def recalculate_scores_v2(self, scores: QuerySet[Score], force: bool = False): + if force: + self.stdout.write(f"Forcing recalculation of all scores...") + + scores_to_recalculate = scores + initial = 0 + else: + scores_to_recalculate = scores.exclude( + performance_calculations__calculator_engine=DifficultyCalculator.engine(), + performance_calculations__calculator_version=DifficultyCalculator.version(), + ) + + if scores_to_recalculate.count() == 0: + self.stdout.write(f"All {scores.count()} scores already up to date") + return + + count_up_to_date = scores.count() - scores_to_recalculate.count() + + if count_up_to_date > 0: + self.stdout.write( + f"Found {count_up_to_date} scores already up to date. Resuming..." + ) + + initial = count_up_to_date + + unique_beatmaps = ( + scores_to_recalculate.values("beatmap_id", "mods") + .annotate(count=Count("*")) + .order_by("-count") + ) + + with tqdm( + desc="Scores", + total=scores.count(), + initial=initial, + smoothing=0, + ) as pbar: + for unique_beatmap in unique_beatmaps: + unique_beatmap_scores = scores_to_recalculate.filter( + beatmap_id=unique_beatmap["beatmap_id"], mods=unique_beatmap["mods"] + ) + self.recalculate_scores_for_unique_beatmap_v2( + unique_beatmap["beatmap_id"], + unique_beatmap["mods"], + unique_beatmap_scores, + pbar, + ) + + self.stdout.write( + self.style.SUCCESS( + f"Successfully updated {scores.count()} scores' performance values" + ) + ) + + @transaction.atomic + def recalculate_scores_for_unique_beatmap_v2( + self, beatmap_id: int, mods: int, scores: Iterable[Score], progress_bar: tqdm + ): + # Validate all scores are of same beatmap/mods + for score in scores: + if score.beatmap_id != beatmap_id or score.mods != mods: + raise Exception( + f"Score {score.id} does not match beatmap {beatmap_id} and mods {mods}" + ) + + # Create difficulty calculation + difficulty_calculation, _ = DifficultyCalculation.objects.get_or_create( + beatmap_id=beatmap_id, + mods=mods, + calculator_engine=DifficultyCalculator.engine(), + calculator_version=DifficultyCalculator.version(), + ) + + # Do difficulty calculation + difficulty_values = difficulty_calculation.calculate_difficulty_values( + DifficultyCalculator + ) + DifficultyValue.objects.bulk_create( + difficulty_values, + update_conflicts=True, + update_fields=["value"], + unique_fields=["calculation_id", "name"], + ) + + score_dict = {} + performance_calculations = [] + for score in scores: + performance_calculations.append( + PerformanceCalculation( + score_id=score.id, + difficulty_calculation_id=difficulty_calculation.id, + calculator_engine=DifficultyCalculator.engine(), + calculator_version=DifficultyCalculator.version(), + ) + ) + score_dict[score.id] = score + + # Create calculations + PerformanceCalculation.objects.bulk_create( + performance_calculations, ignore_conflicts=True + ) + performance_calculations = PerformanceCalculation.objects.filter( + score_id__in=score_dict.keys(), + calculator_engine=DifficultyCalculator.engine(), + calculator_version=DifficultyCalculator.version(), + ) + + # Perform calculations + values = [] + for calculation in performance_calculations: + values.extend( + calculation.calculate_performance_values( + score_dict[calculation.score_id], DifficultyCalculator + ) + ) + progress_bar.update() + + # Create values + PerformanceValue.objects.bulk_create( + values, + update_conflicts=True, + update_fields=["value"], + unique_fields=["calculation_id", "name"], + ) + def recalculate_user_stats(self, all_user_stats: QuerySet[UserStats]): paginator = Paginator(all_user_stats.order_by("pk"), per_page=2000) diff --git a/profiles/migrations/0016_alter_userstats_extra_pp_and_more.py b/profiles/migrations/0016_alter_userstats_extra_pp_and_more.py new file mode 100644 index 0000000..6b8c329 --- /dev/null +++ b/profiles/migrations/0016_alter_userstats_extra_pp_and_more.py @@ -0,0 +1,50 @@ +# Generated by Django 4.2.3 on 2023-11-23 06:38 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ( + "profiles", + "0015_alter_beatmap_max_combo_alter_score_difficulty_total_and_more", + ), + ] + + operations = [ + migrations.AlterField( + model_name="userstats", + name="extra_pp", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_accuracy", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_ar", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_bpm", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_cs", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_length", + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name="userstats", + name="score_style_od", + field=models.FloatField(default=0), + ), + ] diff --git a/profiles/migrations/0017_difficultycalculation_performancecalculation_and_more.py b/profiles/migrations/0017_difficultycalculation_performancecalculation_and_more.py new file mode 100644 index 0000000..0e1ef32 --- /dev/null +++ b/profiles/migrations/0017_difficultycalculation_performancecalculation_and_more.py @@ -0,0 +1,127 @@ +# Generated by Django 4.2.3 on 2023-11-24 05:20 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("profiles", "0016_alter_userstats_extra_pp_and_more"), + ] + + operations = [ + migrations.CreateModel( + name="DifficultyCalculation", + fields=[ + ("id", models.BigAutoField(primary_key=True, serialize=False)), + ("mods", models.IntegerField()), + ("calculator_engine", models.CharField(max_length=50)), + ("calculator_version", models.CharField(max_length=50)), + ( + "beatmap", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="difficulty_calculations", + to="profiles.beatmap", + ), + ), + ], + ), + migrations.CreateModel( + name="PerformanceCalculation", + fields=[ + ("id", models.BigAutoField(primary_key=True, serialize=False)), + ("calculator_engine", models.CharField(max_length=50)), + ("calculator_version", models.CharField(max_length=50)), + ( + "difficulty_calculation", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="performance_calculations", + to="profiles.difficultycalculation", + ), + ), + ( + "score", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="performance_calculations", + to="profiles.score", + ), + ), + ], + ), + migrations.CreateModel( + name="DifficultyValue", + fields=[ + ("id", models.BigAutoField(primary_key=True, serialize=False)), + ("name", models.CharField(max_length=20)), + ("value", models.FloatField()), + ( + "calculation", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="difficulty_values", + to="profiles.difficultycalculation", + ), + ), + ], + options={ + "indexes": [ + models.Index(fields=["value"], name="profiles_di_value_6b1f33_idx") + ], + }, + ), + migrations.CreateModel( + name="PerformanceValue", + fields=[ + ("id", models.BigAutoField(primary_key=True, serialize=False)), + ("name", models.CharField(max_length=20)), + ("value", models.FloatField()), + ( + "calculation", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="performance_calculations", + to="profiles.performancecalculation", + ), + ), + ], + options={ + "indexes": [ + models.Index(fields=["value"], name="profiles_pe_value_a6a611_idx") + ], + }, + ), + migrations.AddConstraint( + model_name="performancevalue", + constraint=models.UniqueConstraint( + fields=("calculation_id", "name"), name="unique_performance_value" + ), + ), + migrations.AddConstraint( + model_name="performancecalculation", + constraint=models.UniqueConstraint( + fields=("score_id", "calculator_engine", "calculator_version"), + name="unique_performance_calculation", + ), + ), + migrations.AddConstraint( + model_name="difficultyvalue", + constraint=models.UniqueConstraint( + fields=("calculation_id", "name"), name="unique_difficulty_value" + ), + ), + migrations.AddConstraint( + model_name="difficultycalculation", + constraint=models.UniqueConstraint( + fields=( + "beatmap_id", + "mods", + "calculator_engine", + "calculator_version", + ), + name="unique_difficulty_calculation", + ), + ), + ] diff --git a/profiles/models.py b/profiles/models.py index 99f22c4..a548e73 100644 --- a/profiles/models.py +++ b/profiles/models.py @@ -75,13 +75,13 @@ class UserStats(models.Model): count_rank_a = models.IntegerField() # osu!chan calculated data - extra_pp = models.FloatField() - score_style_accuracy = models.FloatField() - score_style_bpm = models.FloatField() - score_style_cs = models.FloatField() - score_style_ar = models.FloatField() - score_style_od = models.FloatField() - score_style_length = models.FloatField() + extra_pp = models.FloatField(default=0) + score_style_accuracy = models.FloatField(default=0) + score_style_bpm = models.FloatField(default=0) + score_style_cs = models.FloatField(default=0) + score_style_ar = models.FloatField(default=0) + score_style_od = models.FloatField(default=0) + score_style_length = models.FloatField(default=0) # Relations user = models.ForeignKey(OsuUser, on_delete=models.CASCADE, related_name="stats") @@ -91,22 +91,52 @@ class UserStats(models.Model): objects = UserStatsQuerySet.as_manager() - def add_scores_from_data(self, score_data_list): + def add_scores_from_data(self, score_data_list: list[dict]): """ Adds a list of scores and their beatmaps from the passed score_data_list. (requires all dicts to have beatmap_id set along with usual score data) """ + # Remove unranked scores + # Only process "high scores" (highest scorev1 per mod per map per user) + # (need to make this distinction to prevent lazer scores from being treated as ranked) + ranked_score_data_list = [ + score_data + for score_data in score_data_list + if score_data.get("score_id", None) is not None + ] + + # Parse dates + for score_data in ranked_score_data_list: + score_data["date"] = datetime.strptime( + score_data["date"], "%Y-%m-%d %H:%M:%S" + ).replace(tzinfo=timezone.utc) + + # Remove potential duplicates from a top 100 play also being in the recent 50 + # Unique on date since we don't track score_id (not ideal but not much we can do) + unique_score_data_list = [ + score + for score in ranked_score_data_list + if score + == next(s for s in ranked_score_data_list if s["date"] == score["date"]) + ] + + # Remove scores which already exist in db + score_dates = [s["date"] for s in unique_score_data_list] + existing_score_dates = Score.objects.filter(date__in=score_dates).values_list( + "date", flat=True + ) + new_score_data_list = [] + for score_data in unique_score_data_list: + if score_data["date"] not in existing_score_dates: + new_score_data_list.append(score_data) + # Fetch beatmaps from database in bulk - beatmap_ids = [int(s["beatmap_id"]) for s in score_data_list] + beatmap_ids = [int(s["beatmap_id"]) for s in new_score_data_list] beatmaps = list(Beatmap.objects.filter(id__in=beatmap_ids)) beatmaps_to_create = [] - scores_from_data = [] - for score_data in score_data_list: - # Only process "high scores" (highest scorev1 per mod per map per user) (need to make this distinction to prevent lazer scores from being treated as real) - if score_data.get("score_id", None) is None: - continue - + scores_to_create = [] + for score_data in new_score_data_list: score = Score() # Update Score fields @@ -121,9 +151,7 @@ def add_scores_from_data(self, score_data_list): score.perfect = bool(int(score_data["perfect"])) score.mods = int(score_data["enabled_mods"]) score.rank = score_data["rank"] - score.date = datetime.strptime( - score_data["date"], "%Y-%m-%d %H:%M:%S" - ).replace(tzinfo=timezone.utc) + score.date = score_data["date"] # Update foreign keys # Search for beatmap in fetched, else create it @@ -210,76 +238,41 @@ def add_scores_from_data(self, score_data_list): # Process score score.process() - scores_from_data.append(score) + scores_to_create.append(score) - # Remove potential duplicates from a top 100 play also being in the recent 50 - scores_from_data = [ - score - for score in scores_from_data - if score == next(s for s in scores_from_data if s.date == score.date) - ] + # Bulk add and update beatmaps and scores + created_beatmaps = Beatmap.objects.bulk_create( + beatmaps_to_create, + ignore_conflicts=True, # potential race condition from two concurrent updates creating the same beatmap + ) + created_scores = Score.objects.bulk_create(scores_to_create) - # Process scores for user stats values - all_scores, scores_to_create = self.__process_scores(*scores_from_data) + # Recalculate with new scores added + self.recalculate() self.save() - # Update new scores with newly saved UserStats id - for score in scores_to_create: - score.user_stats_id = self.id - - # Bulk add and update beatmaps and scores - Beatmap.objects.bulk_create(beatmaps_to_create, ignore_conflicts=True) - Score.objects.bulk_create(scores_to_create) - # Return new scores - return scores_to_create + return created_scores def recalculate(self): - self.__process_scores() - - def __process_scores(self, *new_scores): """ - Calculates pp totals (extra pp, nochoke pp) and scores style using unique maps, and returns all scores for UserStats and the scores that need to be added + Calculates pp totals (extra pp, nochoke pp) and scores style using unique maps """ # Fetch all scores currently in database and add to new_scores ensuring no duplicate scores - try: - database_scores = self.scores.select_related("beatmap").filter( + scores = ( + self.scores.select_related("beatmap") + .filter( beatmap__status__in=[ BeatmapStatus.RANKED, BeatmapStatus.APPROVED, BeatmapStatus.LOVED, ] ) - except ValueError: - database_scores = [] - - database_score_dates = [score.date for score in database_scores] - scores_to_create = [ - score for score in new_scores if score.date not in database_score_dates - ] - scores = [ - *[ - score - for score in scores_to_create - if score.beatmap.status - in [BeatmapStatus.RANKED, BeatmapStatus.APPROVED, BeatmapStatus.LOVED] - ], - *database_scores, - ] + .order_by("-performance_total") + ) - # if the user has no scores, zero out the fields if len(scores) == 0: - self.extra_pp = 0 - self.score_style_accuracy = 0 - self.score_style_bpm = 0 - self.score_style_length = 0 - self.score_style_cs = 0 - self.score_style_ar = 0 - self.score_style_od = 0 - return [], [] - - # Sort all scores by pp - scores.sort(key=lambda s: s.performance_total, reverse=True) + return # Filter to be unique on maps (cant use .unique_maps() because duplicate maps might come from new scores) # (also this 1 liner is really inefficient for some reason so lets do it the standard way) @@ -335,8 +328,6 @@ def __process_scores(self, *new_scores): / weighting_value ) - return scores, scores_to_create - def __str__(self): return f"{Gamemode(self.gamemode).name}: {self.user_id}" @@ -475,6 +466,100 @@ def __str__(self): ) +class DifficultyCalculation(models.Model): + """ + Model representing a difficulty calculation of an osu! beatmap + """ + + id = models.BigAutoField(primary_key=True) + + beatmap = models.ForeignKey( + Beatmap, on_delete=models.CASCADE, related_name="difficulty_calculations" + ) + + mods = models.IntegerField() + calculator_engine = models.CharField(max_length=50) + calculator_version = models.CharField(max_length=50) + + def calculate_difficulty_values( + self, difficulty_calculator: type[AbstractDifficultyCalculator] + ) -> list["DifficultyValue"]: + values = [] + beatmap_provider = BeatmapProvider() + beatmap_path = beatmap_provider.get_beatmap_file(self.beatmap_id) + with difficulty_calculator(beatmap_path) as calculator: + calculator.set_mods(self.mods) + calculator.calculate() + + values.append( + DifficultyValue( + calculation_id=self.id, + name="total", + value=calculator.difficulty_total, + ) + ) + + return values + + def __str__(self): + if self.mods == 0: + map_string = f"{self.beatmap_id}" + else: + map_string = f"{self.beatmap_id} +{utils.get_mods_string(self.mods)}" + + return f"{map_string}: {self.calculator_engine} ({self.calculator_version})" + + class Meta: + constraints = [ + # Difficulty values are unique on beatmap + mods + calculator_engine + calculator_version + # The implicit unique b-tree index on these columns is useful also + models.UniqueConstraint( + fields=[ + "beatmap_id", + "mods", + "calculator_engine", + "calculator_version", + ], + name="unique_difficulty_calculation", + ) + ] + + +class DifficultyValue(models.Model): + """ + Model representing a value of a difficulty calculation of an osu! beatmap + """ + + id = models.BigAutoField(primary_key=True) + + calculation = models.ForeignKey( + DifficultyCalculation, + on_delete=models.CASCADE, + related_name="difficulty_values", + ) + + name = models.CharField(max_length=20) + value = models.FloatField() + + def __str__(self): + return f"{self.calculation_id}: {self.name} ({self.value})" + + class Meta: + constraints = [ + # Difficulty values are unique on calculation + name + # The implicit unique b-tree index on these columns is useful also + models.UniqueConstraint( + fields=[ + "calculation_id", + "name", + ], + name="unique_difficulty_value", + ) + ] + + indexes = [models.Index(fields=["value"])] + + class ScoreQuerySet(models.QuerySet): def non_restricted(self): return self.filter(user_stats__user__disabled=False) @@ -739,6 +824,105 @@ class ScoreFilter(models.Model): highest_length = models.FloatField(null=True, blank=True) +class PerformanceCalculation(models.Model): + """ + Model representing a performance calculation of an osu! score + """ + + # TODO: consider using uuid to avoid bulk_create issue + id = models.BigAutoField(primary_key=True) + + score = models.ForeignKey( + Score, on_delete=models.CASCADE, related_name="performance_calculations" + ) + difficulty_calculation = models.ForeignKey( + DifficultyCalculation, + on_delete=models.CASCADE, + related_name="performance_calculations", + ) + + calculator_engine = models.CharField(max_length=50) + calculator_version = models.CharField(max_length=50) + + def calculate_performance_values( + self, score: Score, difficulty_calculator: type[AbstractDifficultyCalculator] + ) -> list["PerformanceValue"]: + values = [] + beatmap_provider = BeatmapProvider() + beatmap_path = beatmap_provider.get_beatmap_file(score.beatmap_id) + with difficulty_calculator(beatmap_path) as calculator: + calculator.set_mods(score.mods) + calculator.set_accuracy( + count_100=score.count_100, + count_50=score.count_50, + ) + calculator.set_misses(score.count_miss) + calculator.set_combo(score.best_combo) + calculator.calculate() + + values.append( + PerformanceValue( + calculation_id=self.id, + name="total", + value=calculator.performance_total, + ) + ) + + return values + + def __str__(self): + return f"{self.score_id}: {self.calculator_engine} ({self.calculator_version})" + + class Meta: + constraints = [ + # Performance values are unique on score + calculator_engine + calculator_version + # The implicit unique b-tree index on these columns is useful also + models.UniqueConstraint( + fields=[ + "score_id", + "calculator_engine", + "calculator_version", + ], + name="unique_performance_calculation", + ) + ] + + +class PerformanceValue(models.Model): + """ + Model representing a value of a performance calculation of an osu! score + """ + + id = models.BigAutoField(primary_key=True) + + calculation = models.ForeignKey( + PerformanceCalculation, + on_delete=models.CASCADE, + related_name="performance_calculations", + ) + + name = models.CharField(max_length=20) + value = models.FloatField() + + def __str__(self): + return f"{self.calculation_id}: {self.name} ({self.value})" + + class Meta: + constraints = [ + # Performance values are unique on calculation + name + # The implicit unique b-tree index on these columns is useful also + models.UniqueConstraint( + fields=[ + "calculation_id", + "name", + ], + name="unique_performance_value", + ) + ] + + indexes = [models.Index(fields=["value"])] + + # Custom lookups diff --git a/profiles/tasks.py b/profiles/tasks.py index f01c342..5317265 100644 --- a/profiles/tasks.py +++ b/profiles/tasks.py @@ -211,6 +211,8 @@ def update_user(user_id=None, username=None, gamemode: int = Gamemode.STANDARD): if score["rank"] != "F" ) + user_stats.save() + # Process and add scores user_stats.add_scores_from_data(score_data_list)