1
0
mirror of synced 2025-02-15 18:02:39 +01:00

122 lines
3.4 KiB
Python
Raw Normal View History

2024-01-09 13:57:59 -05:00
from __future__ import with_statement
2024-11-14 12:36:22 +07:00
import asyncio
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
import os
from pathlib import Path
2024-11-14 12:36:22 +07:00
import threading
2024-01-09 03:07:04 -05:00
from logging.config import fileConfig
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
import yaml
2024-11-14 12:36:22 +07:00
from alembic import context
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
from core.config import CoreConfig
2024-01-09 13:57:59 -05:00
from core.data.schema.base import metadata
2024-01-09 03:07:04 -05:00
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
2024-01-09 13:57:59 -05:00
target_metadata = metadata
2024-01-09 03:07:04 -05:00
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
2024-11-14 12:36:22 +07:00
raise Exception("Not implemented or configured!")
2024-01-09 13:57:59 -05:00
2024-01-09 03:07:04 -05:00
url = config.get_main_option("sqlalchemy.url")
2024-11-14 12:36:22 +07:00
context.configure(url=url, target_metadata=target_metadata, literal_binds=True)
2024-01-09 03:07:04 -05:00
with context.begin_transaction():
context.run_migrations()
2024-11-14 12:36:22 +07:00
def do_run_migrations(connection: Connection) -> None:
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True,
compare_server_default=True,
)
2024-01-09 03:07:04 -05:00
2024-11-14 12:36:22 +07:00
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""In this scenario we need to create an Engine
2024-01-09 03:07:04 -05:00
and associate a connection with the context.
"""
2024-01-09 13:57:59 -05:00
ini_section = config.get_section(config.config_ini_section)
overrides = context.get_x_argument(as_dictionary=True)
for override in overrides:
ini_section[override] = overrides[override]
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
core_config = CoreConfig()
with (Path("../../..") / os.environ["ARTEMIS_CFG_DIR"] / "core.yaml").open(encoding="utf-8") as f:
core_config.update(yaml.safe_load(f))
2024-11-14 12:36:22 +07:00
connectable = async_engine_from_config(
use SQL's limit/offset pagination for nextIndex/maxCount requests (#185) Instead of retrieving the entire list of items/characters/scores/etc. at once (and even store them in memory), use SQL's `LIMIT ... OFFSET ...` pagination so we only take what we need. Currently only CHUNITHM uses this, but this will also affect maimai DX and O.N.G.E.K.I. once the PR is ready. Also snuck in a fix for CHUNITHM/maimai DX's `GetUserRivalMusicApi` to respect the `userRivalMusicLevelList` sent by the client. ### How this works Say we have a `GetUserCharacterApi` request: ```json { "userId": 10000, "maxCount": 700, "nextIndex": 0 } ``` Instead of getting the entire character list from the database (which can be very large if the user force unlocked everything), add limit/offset to the query: ```python select(character) .where(character.c.user == user_id) .order_by(character.c.id.asc()) .limit(max_count + 1) .offset(next_index) ``` The query takes `maxCount + 1` items from the database to determine if there is more items than can be returned: ```python rows = ... if len(rows) > max_count: # return only max_count rows next_index += max_count else: # return everything left next_index = -1 ``` This has the benefit of not needing to load everything into memory (and also having to store server state, as seen in the [`SCORE_BUFFER` list](https://gitea.tendokyu.moe/Hay1tsme/artemis/src/commit/2274b42358d9ef449ca541a46ce654b846ce7f7c/titles/chuni/base.py#L13).) Reviewed-on: https://gitea.tendokyu.moe/Hay1tsme/artemis/pulls/185 Co-authored-by: beerpsi <beerpsi@duck.com> Co-committed-by: beerpsi <beerpsi@duck.com>
2024-11-16 19:10:29 +00:00
ini_section,
poolclass=pool.NullPool,
connect_args={
"charset": "utf8mb4",
"ssl": core_config.database.create_ssl_context_if_enabled(),
}
2024-11-14 12:36:22 +07:00
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online():
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# there's no event loop
asyncio.run(run_async_migrations())
else:
# there's currently an event loop and trying to wait for a coroutine
# to finish without using `await` is pretty wormy. nested event loops
# are explicitly forbidden by asyncio.
#
# take the easy way out, spawn it in another thread.
thread = threading.Thread(target=asyncio.run, args=(run_async_migrations(),))
thread.start()
thread.join()
2024-01-09 03:07:04 -05:00
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()