1
0
mirror of synced 2025-02-23 13:09:15 +01:00

222 lines
7.4 KiB
Python
Raw Normal View History

from typing import Dict, List, Optional
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
from sqlalchemy import Column, Table, UniqueConstraint
from sqlalchemy.dialects.mysql import insert
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
from sqlalchemy.engine import Row
from sqlalchemy.schema import ForeignKey
from sqlalchemy.sql import select
from sqlalchemy.types import TIMESTAMP, Boolean, Float, Integer, String
from core.data.schema import BaseData, metadata
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
score_best: Table = Table(
"ongeki_score_best",
metadata,
Column("id", Integer, primary_key=True, nullable=False),
2023-03-09 11:38:58 -05:00
Column(
"user",
ForeignKey("aime_user.id", ondelete="cascade", onupdate="cascade"),
nullable=False,
),
Column("musicId", Integer, nullable=False),
Column("level", Integer, nullable=False),
2023-03-09 11:38:58 -05:00
Column("playCount", Integer, nullable=False),
Column("techScoreMax", Integer, nullable=False),
Column("techScoreRank", Integer, nullable=False),
Column("battleScoreMax", Integer, nullable=False),
2023-03-09 11:38:58 -05:00
Column("battleScoreRank", Integer, nullable=False),
Column("maxComboCount", Integer, nullable=False),
Column("maxOverKill", Float, nullable=False),
Column("maxTeamOverKill", Float, nullable=False),
Column("isFullBell", Boolean, nullable=False),
Column("isFullCombo", Boolean, nullable=False),
Column("isAllBreake", Boolean, nullable=False),
Column("isLock", Boolean, nullable=False),
2024-05-27 06:42:08 +09:00
Column("clearStatus", Integer, nullable=False),
Column("isStoryWatched", Boolean, nullable=False),
2023-03-03 00:00:22 -05:00
Column("platinumScoreMax", Integer),
UniqueConstraint("user", "musicId", "level", name="ongeki_best_score_uk"),
2023-03-09 11:38:58 -05:00
mysql_charset="utf8mb4",
)
playlog = Table(
"ongeki_score_playlog",
metadata,
Column("id", Integer, primary_key=True, nullable=False),
2023-03-09 11:38:58 -05:00
Column(
"user",
ForeignKey("aime_user.id", ondelete="cascade", onupdate="cascade"),
nullable=False,
),
Column("sortNumber", Integer),
Column("placeId", Integer),
Column("placeName", String(255)),
Column("playDate", TIMESTAMP),
Column("userPlayDate", TIMESTAMP),
Column("musicId", Integer),
Column("level", Integer),
Column("playKind", Integer),
Column("eventId", Integer),
Column("eventName", String(255)),
Column("eventPoint", Integer),
Column("playedUserId1", Integer),
Column("playedUserId2", Integer),
Column("playedUserId3", Integer),
Column("playedUserName1", String(8)),
Column("playedUserName2", String(8)),
Column("playedUserName3", String(8)),
Column("playedMusicLevel1", Integer),
Column("playedMusicLevel2", Integer),
Column("playedMusicLevel3", Integer),
Column("cardId1", Integer),
Column("cardId2", Integer),
Column("cardId3", Integer),
Column("cardLevel1", Integer),
Column("cardLevel2", Integer),
Column("cardLevel3", Integer),
Column("cardAttack1", Integer),
Column("cardAttack2", Integer),
Column("cardAttack3", Integer),
Column("bossCharaId", Integer),
Column("bossLevel", Integer),
Column("bossAttribute", Integer),
Column("clearStatus", Integer),
Column("techScore", Integer),
Column("techScoreRank", Integer),
Column("battleScore", Integer),
Column("battleScoreRank", Integer),
Column("maxCombo", Integer),
Column("judgeMiss", Integer),
Column("judgeHit", Integer),
Column("judgeBreak", Integer),
Column("judgeCriticalBreak", Integer),
Column("rateTap", Integer),
Column("rateHold", Integer),
Column("rateFlick", Integer),
Column("rateSideTap", Integer),
Column("rateSideHold", Integer),
Column("bellCount", Integer),
Column("totalBellCount", Integer),
Column("damageCount", Integer),
Column("overDamage", Integer),
Column("isTechNewRecord", Boolean),
Column("isBattleNewRecord", Boolean),
Column("isOverDamageNewRecord", Boolean),
Column("isFullCombo", Boolean),
Column("isFullBell", Boolean),
Column("isAllBreak", Boolean),
Column("playerRating", Integer),
Column("battlePoint", Integer),
2023-03-03 00:00:22 -05:00
Column("platinumScore", Integer),
Column("platinumScoreMax", Integer),
2023-03-09 11:38:58 -05:00
mysql_charset="utf8mb4",
)
tech_count = Table(
"ongeki_score_tech_count",
metadata,
Column("id", Integer, primary_key=True, nullable=False),
2023-03-09 11:38:58 -05:00
Column(
"user",
ForeignKey("aime_user.id", ondelete="cascade", onupdate="cascade"),
nullable=False,
),
Column("levelId", Integer, nullable=False),
Column("allBreakCount", Integer),
Column("allBreakPlusCount", Integer),
UniqueConstraint("user", "levelId", name="ongeki_tech_count_uk"),
2023-03-09 11:38:58 -05:00
mysql_charset="utf8mb4",
)
2023-03-09 11:38:58 -05:00
class OngekiScoreData(BaseData):
2024-01-09 14:42:17 -05:00
async def get_tech_count(self, aime_id: int) -> Optional[List[Dict]]:
sql = select(tech_count).where(tech_count.c.user == aime_id)
result = await self.execute(sql)
if result is None:
return None
return result.fetchall()
2023-03-09 11:38:58 -05:00
2024-01-09 14:42:17 -05:00
async def put_tech_count(self, aime_id: int, tech_count_data: Dict) -> Optional[int]:
tech_count_data["user"] = aime_id
sql = insert(tech_count).values(**tech_count_data)
conflict = sql.on_duplicate_key_update(**tech_count_data)
2024-01-09 14:42:17 -05:00
result = await self.execute(conflict)
if result is None:
2023-08-08 10:17:56 -04:00
self.logger.warning(f"put_tech_count: Failed to update! aime_id: {aime_id}")
return None
return result.lastrowid
2023-03-09 11:38:58 -05:00
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
async def get_best_scores(
self,
aime_id: int,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Optional[List[Row]]:
cond = score_best.c.user == aime_id
if limit is None and offset is None:
sql = (
select(score_best)
.where(cond)
.order_by(score_best.c.musicId, score_best.c.level)
)
else:
subq = (
select(score_best.c.musicId)
.distinct()
.where(cond)
.order_by(score_best.c.musicId)
)
if limit is not None:
subq = subq.limit(limit)
if offset is not None:
subq = subq.offset(offset)
subq = subq.subquery()
sql = (
select(score_best)
.join(subq, score_best.c.musicId == subq.c.musicId)
.where(cond)
.order_by(score_best.c.musicId, score_best.c.level)
)
2024-01-09 14:42:17 -05:00
result = await self.execute(sql)
2023-03-09 11:38:58 -05:00
if result is None:
return None
return result.fetchall()
2024-01-09 14:42:17 -05:00
async def get_best_score(
2023-03-09 11:38:58 -05:00
self, aime_id: int, song_id: int, chart_id: int = None
) -> Optional[List[Dict]]:
return []
2023-03-09 11:38:58 -05:00
2024-01-09 14:42:17 -05:00
async def put_best_score(self, aime_id: int, music_detail: Dict) -> Optional[int]:
music_detail["user"] = aime_id
sql = insert(score_best).values(**music_detail)
conflict = sql.on_duplicate_key_update(**music_detail)
2024-01-09 14:42:17 -05:00
result = await self.execute(conflict)
if result is None:
2023-08-08 10:17:56 -04:00
self.logger.warning(f"put_best_score: Failed to add score! aime_id: {aime_id}")
return None
return result.lastrowid
2024-01-09 14:42:17 -05:00
async def put_playlog(self, aime_id: int, playlog_data: Dict) -> Optional[int]:
playlog_data["user"] = aime_id
sql = insert(playlog).values(**playlog_data)
2024-01-09 14:42:17 -05:00
result = await self.execute(sql)
if result is None:
2023-08-08 10:17:56 -04:00
self.logger.warning(f"put_playlog: Failed to add playlog! aime_id: {aime_id}")
return None
2023-03-09 11:38:58 -05:00
return result.lastrowid