feat(statistics): store ranked_score & total_score under classic scoring mode (#68)
* Initial plan * feat(calculator): add classic score simulator and scoring mode support - Add ScoringMode enum with STANDARDISED and CLASSIC modes - Add scoring_mode configuration to game settings - Implement GetDisplayScore function in calculator.py - Add get_display_score method to Score model - Update score statistics to use display scores based on scoring mode Co-authored-by: MingxuanGame <68982190+MingxuanGame@users.noreply.github.com> * fix(calculator): apply scoring mode to TotalScoreBestScore delete method - Update delete method to use display score for consistency - Ensures all UserStatistics modifications use configured scoring mode Co-authored-by: MingxuanGame <68982190+MingxuanGame@users.noreply.github.com> * refactor(calculator): address code review feedback - Move MAX_SCORE constant to app/const.py - Implement is_basic() as method in HitResult enum - Move imports to top of file in Score model - Revert TotalScoreBestScore storage to use standardised score - Apply display score calculation in tools/recalculate.py - Keep display score usage in UserStatistics modifications Co-authored-by: MingxuanGame <68982190+MingxuanGame@users.noreply.github.com> * chore(linter): auto fix by pre-commit hooks * Don't use forward-ref for `ScoringMode` * chore(linter): auto fix by pre-commit hooks * fix(calculator): update HitResult usage in get_display_score and adjust ruleset value in PerformanceServerPerformanceCalculator --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: MingxuanGame <MingxuanGame@outlook.com>
This commit is contained in:
@@ -11,6 +11,7 @@ from app.calculator import (
|
||||
calculate_weighted_acc,
|
||||
calculate_weighted_pp,
|
||||
clamp,
|
||||
get_display_score,
|
||||
pre_fetch_and_calculate_pp,
|
||||
)
|
||||
from app.config import settings
|
||||
@@ -34,6 +35,7 @@ from app.models.score import (
|
||||
ScoreStatistics,
|
||||
SoloScoreSubmissionInfo,
|
||||
)
|
||||
from app.models.scoring_mode import ScoringMode
|
||||
from app.storage import StorageService
|
||||
from app.utils import utcnow
|
||||
|
||||
@@ -223,6 +225,26 @@ class Score(ScoreBase, table=True):
|
||||
def replay_filename(self) -> str:
|
||||
return f"replays/{self.id}_{self.beatmap_id}_{self.user_id}_lazer_replay.osr"
|
||||
|
||||
def get_display_score(self, mode: ScoringMode | None = None) -> int:
|
||||
"""
|
||||
Get the display score for this score based on the scoring mode.
|
||||
|
||||
Args:
|
||||
mode: The scoring mode to use. If None, uses the global setting.
|
||||
|
||||
Returns:
|
||||
The display score in the requested scoring mode
|
||||
"""
|
||||
if mode is None:
|
||||
mode = settings.scoring_mode
|
||||
|
||||
return get_display_score(
|
||||
ruleset_id=int(self.gamemode),
|
||||
total_score=self.total_score,
|
||||
mode=mode,
|
||||
maximum_statistics=self.maximum_statistics,
|
||||
)
|
||||
|
||||
async def to_resp(self, session: AsyncSession, api_version: int) -> "ScoreResp | LegacyScoreResp":
|
||||
if api_version >= 20220705:
|
||||
return await ScoreResp.from_db(session, self)
|
||||
@@ -1108,12 +1130,17 @@ async def _process_statistics(
|
||||
raise ValueError(f"User {user.id} does not have statistics for mode {score.gamemode.value}")
|
||||
|
||||
# pc, pt, tth, tts
|
||||
statistics.total_score += score.total_score
|
||||
difference = score.total_score - previous_score_best.total_score if previous_score_best else score.total_score
|
||||
# Get display scores based on configured scoring mode
|
||||
current_display_score = score.get_display_score()
|
||||
previous_display_score = previous_score_best.score.get_display_score() if previous_score_best else 0
|
||||
|
||||
statistics.total_score += current_display_score
|
||||
difference = current_display_score - previous_display_score
|
||||
logger.debug(
|
||||
"Score delta computed for {score_id}: {difference}",
|
||||
"Score delta computed for {score_id}: {difference} (display score in {mode} mode)",
|
||||
score_id=score.id,
|
||||
difference=difference,
|
||||
mode=settings.scoring_mode,
|
||||
)
|
||||
if difference > 0 and score.passed and ranked:
|
||||
match score.rank:
|
||||
|
||||
@@ -56,8 +56,10 @@ class TotalScoreBestScore(SQLModel, table=True):
|
||||
)
|
||||
statistics = statistics.first()
|
||||
if statistics:
|
||||
statistics.total_score -= self.total_score
|
||||
statistics.ranked_score -= self.total_score
|
||||
# Use display score from the referenced score for consistency with current scoring mode
|
||||
display_score = self.score.get_display_score()
|
||||
statistics.total_score -= display_score
|
||||
statistics.ranked_score -= display_score
|
||||
statistics.level_current = calculate_score_to_level(statistics.total_score)
|
||||
match self.rank:
|
||||
case Rank.X:
|
||||
|
||||
Reference in New Issue
Block a user