3
0
mirror of synced 2024-11-27 17:00:55 +01:00

Initial commit

This commit is contained in:
CrazyRedMachine 2023-07-24 23:48:07 +02:00
parent 731b747525
commit 329c6aac3d
16 changed files with 3104 additions and 1 deletions

View File

@ -1,2 +1,13 @@
# popnhax_tools
Scripts related to popnhax
Scripts related to popnhax and pop'n omnimix.
Click on a folder for more info.
## pms2bemani
This allows to convert .pms files into a format suited for popnhax custom packs.
## omnimix
This is a collection of various omnimix related scripts and documentation.

76
omnimix/README.md Normal file
View File

@ -0,0 +1,76 @@
# Omnimix documentation
Refer to [omnimix_db.md](omnimix_db.md)
# Omnimix tools
## ida_find_addrs.py
IDA script tested in 6.6 and 7.x.
Creates a map file based on the opened DLL file.
The output file is not guaranteed to work but it should be about 95% right.
If the game crashes when you use a newly generated XML file, diff with a known good/working XML file to figure out what patches don't look right and remove them.
Usage:
1. Load popn22.dll in IDA Pro
2. Wait until IDA finishes analyzing the entire DLL
3. File > Script file... > select ida_find_addrs.py
4. Copy output XML file from IDA's output window (by default it will be docked to the bottom of the screen)
## db_dump.py
Dump the full database information from the specified DLL using the input XML mapping information.
You can obtain the XML maps by using ida_find_addrs.py.
Usage:
```bash
> python3 db_dump.py --help
usage: db_dump.py [-h] --input-dll INPUT_DLL --input-xml INPUT_XML
[--output OUTPUT]
optional arguments:
-h, --help show this help message and exit
--input-dll INPUT_DLL
Input DLL file
--input-xml INPUT_XML
Input XML file
--output OUTPUT Output folder
```
Example: `python3 db_dump.py --input-dll popn22.dll --input-xml db/patches_2018082100.xml --output 2018082100`
## verify_data.py
Verify the integrity of the game's data. This checks to make sure that all of the expected chart files, previews, and certain images are as expected.
Chart data itself is verified using various criteria for what I felt a "standard" chart would be.
Not all charts, including official charts, meet this criteria but still work in-game.
WARNING: This tool is slow because it checks all song-related IFS files, including verifying all of the charts as much as possible.
Usage:
```bash
> python3 verify_data.py --help
usage: verify_data.py [-h] --input-dll INPUT_DLL --input-xml INPUT_XML
--input-data INPUT_DATA [--input-db INPUT_DB]
optional arguments:
-h, --help show this help message and exit
--input-dll INPUT_DLL
Input DLL file
--input-xml INPUT_XML
Input XML file
--input-data INPUT_DATA
Input data folder
--input-db INPUT_DB Input db folder
```
Example: `python3 verify_data.py --input-dll popn22.dll --input-xml db/patches_2018082100.xml --input-data data --input-db db`
# Other Important Notes
- ~~As of time of writing, the latest version of ifstools (1.14) will not extract jacket.ifs properly on Windows due to NTFS's case-insensitivity, resulting in 3 images being overwritten with data that won't work in-game. You can extract on a *nix system to get the correct jacket images if you see a green block in place of the jackets for the affected songs.~~
- Not pushed out to pypi yet, but this has already been fixed in master and will be included in the next release of ifstools where you can use the `--rename-dupes` flag (thanks mon!).
- Character database editing is slightly restrictive at the moment due to not being able to add new entries to the flavor table. When trying to add new entries to the flavor table there is a high chance of the game crashing in my experience. My guess is that there are some more places that should be patched that I have not found yet. This is a technical issue that could be solved with more work but it's more of a stretch goal than a main goal for this project so I put in a bandaid to make sure that the flavor table never expands. This issue is also why some unlocked characters will turn into Nyami alts.

34
omnimix/db_dump.py Normal file
View File

@ -0,0 +1,34 @@
import argparse
import os
from lxml.etree import tostring
from lxml.builder import E
import popndll
def save_databases(databases, output_base_folder):
os.makedirs(output_base_folder, exist_ok=True)
for data, elm_name, output_basename, chunk_size in [(databases['charadb'], "chara", "charadb", 500), (databases['musicdb'], "music", "musicdb", 500)]:
chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)]
for idx, chunk in enumerate(chunks):
xml = E.database(
*[E(elm_name, *popndll.serialize_data(x), id=str(x['_id'])) for x in chunk]
)
output_filename = os.path.join(output_base_folder, "%s_%d.xml" % (output_basename, idx))
open(output_filename, "wb").write(tostring(xml, pretty_print=True, method='xml', encoding='cp932', xml_declaration=True).replace(b"cp932", b"shift-jis"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input-dll', help='Input DLL file', default=None, required=True)
parser.add_argument('--input-xml', help='Input XML file', default=None, required=True)
parser.add_argument('--output', help='Output folder', default="output")
args = parser.parse_args()
databases = popndll.parse_database_from_dll(args.input_dll, args.input_xml)
save_databases(databases, args.output)

324
omnimix/ida_find_addrs.py Normal file
View File

@ -0,0 +1,324 @@
import idc
import idaapi
import idautils
import struct
MUSIC_IDX = 0
CHART_IDX = 1
STYLE_IDX = 2
FLAVOR_IDX = 3
CHARA_IDX = 4
OFFSET_BLACKLIST = [
# These offsets are known bads
0x100be154,
0x100be346,
0x100bed91,
0x100fa4e2,
]
def find_binary(search, search_head, search_idx):
ea = 0
found = -1
while True:
ea = idc.FindBinary(ea + 1, SEARCH_DOWN | SEARCH_NEXT, search)
if ea == idc.BADADDR:
break
found += 1
ea = ea + search_head
if found != search_idx:
continue
return ea
return None
def find_binary_xref(search, search_head, search_idx, xref_search_idx):
ea = 0
found = -1
while True:
ea = idc.FindBinary(ea + 1, SEARCH_DOWN | SEARCH_NEXT, search)
if ea == idc.BADADDR:
break
found += 1
ea = ea + search_head
if found != search_idx:
continue
for xref_idx, xref in enumerate(idautils.XrefsTo(ea)):
if xref_idx == xref_search_idx:
return xref.frm
return None
def get_table_size_by_xref(ea, entry_size):
# Skip 10 entries because why not. We're looking for the end anyway
orig_ea = ea
ea += entry_size * 10
found_end = False
while not found_end:
for xref_idx, xref in enumerate(idautils.XrefsTo(ea)):
found_end = True
break
if not found_end:
ea += entry_size
return (ea - orig_ea) // entry_size
def find_weird_update_patches():
ea = find_binary("83 C4 04 3B C5 74 09", 0, 0)
orig_ea = ea
values = []
# Find previous PUSH
while orig_ea - ea < 0x1000:
if idc.GetMnem(ea) == "push":
values.append([MUSIC_IDX, 11, idc.GetOperandValue(ea, 0), ea])
break
ea = idc.PrevHead(ea)
# Find next CALL
ea = orig_ea
call_ea = None
while ea - orig_ea < 0x1000:
if idc.GetMnem(ea) == "call":
call_ea = idc.GetOperandValue(ea, 0)
break
ea = idc.NextHead(ea)
if call_ea is None:
print("Couldn't find call, can't finish")
exit(1)
ea = idc.FindFuncEnd(call_ea)
lea_values = []
lea_orders = [11, 10, 9]
while ea >= call_ea:
if idc.GetMnem(ea) == "lea" and idc.GetOpnd(ea, 1).startswith('[ebx+'):
lea_values.append([MUSIC_IDX, lea_orders[len(lea_values)], idc.GetOperandValue(ea, 1), ea])
# It is probably possible to pull a lot more from here
if len(lea_values) == 3:
break
ea = idc.PrevHead(ea)
return lea_values[::-1] + values
# These all reference the first entry in their respective tables
music_table_addr = find_binary_xref("00 83 7C 83 62 83 76 83 58 00", 1, 0, 0)
chart_table_addr = find_binary_xref("00 70 6F 70 6E 31 00 00", 1, 0, 1)
style_table_addr = find_binary("01 00 00 00 FF 54 0C 00 1A 00 00 00 11 00 00 00", 0, 2)
flavor_table_addr = find_binary("00 82 BB 82 EA 82 A2 82 AF 81 5B 00 00 00 82 A4 82", 1, 0)
chara_table_addr = find_binary_xref("00 62 61 6D 62 5F 31 61 00", 1, 0, 0)
# Modify the entry sizes as required
buffer_addrs = [
# entry type, table address, entry size
[MUSIC_IDX, music_table_addr, 0xac],
[CHART_IDX, chart_table_addr, 0x20], # Probably won't change?
[STYLE_IDX, style_table_addr, 0x10], # Unlikely to change
[FLAVOR_IDX, flavor_table_addr, 0x60],
[CHARA_IDX, chara_table_addr, 0x4C],
]
limit_info_list = [
# buffer_addr + (buffer_entry_size * limit) should give you the very end of the array (after the last entry)
[MUSIC_IDX, get_table_size_by_xref(*buffer_addrs[MUSIC_IDX][1:])],
[CHART_IDX, get_table_size_by_xref(*buffer_addrs[CHART_IDX][1:])],
[STYLE_IDX, get_table_size_by_xref(*buffer_addrs[STYLE_IDX][1:])],
[FLAVOR_IDX, get_table_size_by_xref(*buffer_addrs[FLAVOR_IDX][1:])],
[CHARA_IDX, get_table_size_by_xref(*buffer_addrs[CHARA_IDX][1:])],
]
update_patches = [
[MUSIC_IDX, 0, limit_info_list[MUSIC_IDX][1] - 1],
[MUSIC_IDX, 0, limit_info_list[MUSIC_IDX][1]],
[CHART_IDX, 0, limit_info_list[CHART_IDX][1]],
[CHART_IDX, 0, limit_info_list[CHART_IDX][1] - 1],
[CHARA_IDX, 0, limit_info_list[CHARA_IDX][1]],
[FLAVOR_IDX, 0, limit_info_list[FLAVOR_IDX][1] - 1],
[FLAVOR_IDX, 0, limit_info_list[FLAVOR_IDX][1]],
# These values may change in a future patch, but they worked for Usaneko and Peace for now.
# These could possibly be done using something similar to the find_weird_update_patches code.
[MUSIC_IDX, 1, 0x1BD0 - (1780 - limit_info_list[MUSIC_IDX][1]) * 4],
[MUSIC_IDX, 1, 0x1Bcf - (1780 - limit_info_list[MUSIC_IDX][1]) * 4],
[MUSIC_IDX, 2, 0xA6E0 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x18],
[MUSIC_IDX, 3, 0x29B7 - (1780 - limit_info_list[MUSIC_IDX][1]) * 6],
[MUSIC_IDX, 4, 0x3E944 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x90],
[MUSIC_IDX, 4, 0x3E948 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x90],
[MUSIC_IDX, 5, 0x1F4F4 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x48],
[MUSIC_IDX, 5, 0x1F4C0 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x48],
[MUSIC_IDX, 5, 0x1F4F0 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x48],
[MUSIC_IDX, 6, 0x7D3D8 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x120],
[MUSIC_IDX, 6, 0x7D3D4 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x120],
[MUSIC_IDX, 7, 0x1D8E58 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x440],
[MUSIC_IDX, 7, 0x1D9188 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x440],
[MUSIC_IDX, 8, 0x5370 - (1780 - limit_info_list[MUSIC_IDX][1]) * 0x0c],
[FLAVOR_IDX, 8, limit_info_list[FLAVOR_IDX][1] * 0x0c],
[FLAVOR_IDX, 8, limit_info_list[FLAVOR_IDX][1] * 0x0c + 4],
]
update_patches_weird = find_weird_update_patches()
hook_addrs = [
[0, find_binary("8B C6 E8 ?? ?? ?? ?? 83 F8 ?? 7D ?? 56 8A C3 E8 ?? ?? ?? ?? 83 C4 04 3D ?? ?? ?? ?? 7D ??", 0, 0)],
[1, find_binary("83 F8 ?? 0F 9C C0 E8", 0, 0)],
]
TARGETS = {
MUSIC_IDX: 'music',
CHART_IDX: 'chart',
STYLE_IDX: 'style',
FLAVOR_IDX: 'flavor',
CHARA_IDX: 'chara'
}
print("<?xml version='1.0' encoding='shift-jis'?>")
print("<patches>")
print("\t<limits>")
for limit_info in limit_info_list:
patch_target, limit_value = limit_info
print('\t\t<%s __type="u32">%d</%s>' % (TARGETS[patch_target], limit_value, TARGETS[patch_target]))
print("\t</limits>")
print("\t<buffer_base_addrs>")
for buffer_info in buffer_addrs:
patch_target, buffer_addr, entry_size = buffer_info
print('\t\t<%s __type="str">0x%x</%s>' % (TARGETS[patch_target], buffer_addr, TARGETS[patch_target]))
print("\t</buffer_base_addrs>")
print("\t<buffers_patch_addrs>")
for buffer_info in buffer_addrs:
patch_target, search_value_base, entry_size = buffer_info
for search_value in range(search_value_base, search_value_base + entry_size + 1):
raw_search_value = bytearray(struct.pack("<I", search_value))
for xref in idautils.XrefsTo(search_value):
ea = xref.frm
# Find extact bytes to be patched
raw_bytes = bytearray([idc.Byte(ea + i) for i in range(0x10)])
if raw_search_value not in raw_bytes:
print('\t\t<!-- Couldn\t find raw bytes: ' + idc.GetDisasm(ea) + ' -->')
continue
if ea + raw_bytes.index(raw_search_value) in OFFSET_BLACKLIST:
continue
print('\t\t<!-- ' + idc.GetDisasm(ea) + ' -->')
print('\t\t<%s __type="str">0x%x</%s>' % (TARGETS[patch_target], ea + raw_bytes.index(raw_search_value), TARGETS[patch_target]))
print ""
# This is a hack for Usaneko.
# Usaneko's code is dumb.
# If it doesn't find *this* address it won't stop the loop.
random_lv7 = find_binary_xref("83 89 83 93 83 5F 83 80 20 4C 76 20 37 00 00 00", 0, 0, 0)
random_lv7_xrefs = idautils.XrefsTo(random_lv7) if random_lv7 is not None else []
for x in random_lv7_xrefs:
ea = x.frm
raw_bytes = bytearray([idc.Byte(ea + i) for i in range(0x10)])
raw_search_value = bytearray(struct.pack("<I", random_lv7))
if ea + raw_bytes.index(raw_search_value) in OFFSET_BLACKLIST:
continue
print('\t\t<!-- ' + idc.GetDisasm(ea) + ' -->')
print('\t\t<%s __type="str">0x%x</%s>' % (TARGETS[MUSIC_IDX], ea + raw_bytes.index(raw_search_value), TARGETS[MUSIC_IDX]))
print("\t</buffers_patch_addrs>")
print("\t<other_patches>")
for patch_info in update_patches:
patch_target, patch_type, search_value = patch_info
raw_search_value = bytearray(struct.pack("<I", search_value))
ea = 0
while ea != idc.BADADDR:
(ea, n) = idc.FindImmediate(ea, idc.SEARCH_DOWN, search_value)
if ea != idc.BADADDR:
if "%X" % search_value not in idc.GetDisasm(ea):
continue
if idc.GetMnem(ea) == "dd" or idc.GetDisasm(ea).strip().startswith("dd "):
# Skip non-code bits
continue
if "NumberOfBytesWritten" in idc.GetDisasm(ea):
# Skip known bad parts
continue
# Find extact bytes to be patched
raw_bytes = bytearray([idc.Byte(ea + i) for i in range(0x10)])
if raw_search_value not in raw_bytes:
continue
if ea + raw_bytes.index(raw_search_value) in OFFSET_BLACKLIST:
continue
print('\t\t<!-- ' + idc.GetDisasm(ea) + ' -->')
print('\t\t<%s __type="str" method="%d" expected="0x%x">0x%x</%s>' % (TARGETS[patch_target], patch_type, search_value, ea + raw_bytes.index(raw_search_value), TARGETS[patch_target]))
print ""
for patch_info in update_patches_weird:
patch_target, patch_type, search_value, ea = patch_info
raw_search_value = bytearray(struct.pack("<I", search_value))
# Find extact bytes to be patched
raw_bytes = bytearray([idc.Byte(ea + i) for i in range(0x10)])
if raw_search_value not in raw_bytes:
print('\t\t<!-- Couldn\t find raw bytes: ' + idc.GetDisasm(ea) + ' -->')
continue
if ea + raw_bytes.index(raw_search_value) in OFFSET_BLACKLIST:
continue
print('\t\t<!-- ' + idc.GetDisasm(ea) + ' -->')
print('\t\t<%s __type="str" method="%d" expected="0x%x">0x%x</%s>' % (TARGETS[patch_target], patch_type, search_value, ea + raw_bytes.index(raw_search_value), TARGETS[patch_target]))
print ""
print("\t</other_patches>")
print("\t<hook_addrs>")
for hook_info in hook_addrs:
hook_type, offset = hook_info
if offset is None:
continue
if hook_type == 1:
offset = idc.NextHead(offset)
offset = idc.NextHead(offset)
print('\t\t<!-- ' + idc.GetDisasm(offset) + ' -->')
print('\t\t<offset __type="str" method="%d">0x%x</offset>' % (hook_type, offset))
print ""
print("\t</hook_addrs>")
print("</patches>")

13
omnimix/legacy/README.md Normal file
View File

@ -0,0 +1,13 @@
# Legacy scripts
These scripts are deprecated and not maintained anymore. For archival purpose only
## convert_omni
This tool converts the v1 omnimix_data_install folder into a proper data_mods/omnimix song pack.
Usage:
1. Place `omnimix_data_install` and `tools` folder from the omnimix v1 archive in the same folder as the script.
2. Run `convert_omnimix.bat`
The generated `data_mods` folder is now suitable to use with omnimix v2, copy it to your pop'n music "contents" folder along with the omnimix_v2 archive contents.

View File

@ -0,0 +1,10 @@
@echo off
pushd
cd /d %~dp0
omnimix_data_install\\python\\python.exe convert_omnimix.py
popd
pause

View File

@ -0,0 +1,144 @@
import glob
import hashlib
import os
import shutil
import sys
import subprocess
import ifstools
from lxml.etree import tostring, fromstring, XMLParser, parse as etree_parse
from lxml.builder import E
def copytree(src, dst, symlinks=False, ignore=None):
# https://stackoverflow.com/a/13814557
os.makedirs(dst, exist_ok=True)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
print("Copying %s to %s..." % (s, d))
if os.path.exists(d):
os.unlink(d)
shutil.copy2(s, d)
def get_unique_files(path, unique_files):
base_filenames = []
for filename in unique_files:
base_filenames.append(os.path.basename(filename))
for path in glob.glob(os.path.join(path, "*")):
if os.path.isdir(path):
return get_unique_files(path, unique_files)
basename = os.path.basename(path)
if basename not in base_filenames:
base_filenames.append(basename)
unique_files.append(path)
return unique_files
omnimix_new_patch_path = os.path.join("omnimix_data_install", "omnimix_new", "data_patch")
omnimix_old_patch_path = os.path.join("omnimix_data_install", "omnimix_old", "data_patch")
# Check that the required data is available
assert(os.path.exists("omnimix_data_install") == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_old")) == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_old", "data")) == True)
assert(os.path.exists(omnimix_old_patch_path) == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_old", "db")) == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_new")) == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_new", "data")) == True)
assert(os.path.exists(omnimix_new_patch_path) == True)
assert(os.path.exists(os.path.join("omnimix_data_install", "omnimix_new", "db")) == True)
# Copy full data folders
for folder in ["omnimix_old", "omnimix_new"]:
path = os.path.join("omnimix_data_install", folder, "data")
if os.path.exists(path):
copytree(path, os.path.join("data_mods", "omnimix"))
# Copy and rename _mod files
data_sets = [
(os.path.join("data", "tex", "system", "icon_diff.ifs"), "icon_mod", "icon_diff", True),
(os.path.join("data", "tex", "system", "chara_name_diff.ifs"), "chara_name_mod", "chara_name_diff", True),
(os.path.join("data", "tex", "system", "chara_name_new_diff.ifs"), "chara_name_new_mod", "chara_name_new_diff", False),
(os.path.join("data", "tex", "system", "kc_diff.ifs"), "kc_mod", "kc_diff", False),
(os.path.join("data", "tex", "system", "bg_diff.ifs"), "bg_mod", "bg_diff", False),
(os.path.join("data", "tex", "system", "ha_merge.ifs"), "ha_mod", "ha_merge", False),
(os.path.join("data", "tex", "system22", "charapop_diff.ifs"), "charapop_mod", "charapop_diff", False),
]
xml_patch_values = {x[2]: 0 for x in data_sets}
for data_set in data_sets:
ifs_path, source, target, is_tex_archive = data_set
print("Processing %s..." % ifs_path)
tmp_path = os.path.join("tmp", target)
mod_ifs_path = os.path.join(os.path.dirname(ifs_path), "%s_ifs" % target)
mod_ifs_path = mod_ifs_path.replace("data", os.path.join("data_mods", "omnimix"))
os.makedirs(mod_ifs_path, exist_ok=True)
print("Created ", mod_ifs_path)
if is_tex_archive:
os.makedirs(os.path.join(mod_ifs_path, "tex"), exist_ok=True)
if os.path.exists(path):
copytree(path, os.path.join("data_mods", "omnimix"))
unique_files = []
unique_files = get_unique_files(os.path.join(omnimix_new_patch_path, source), unique_files)
unique_files = get_unique_files(os.path.join(omnimix_old_patch_path, source), unique_files)
# Copy data
for filename in unique_files:
target_path = os.path.join(mod_ifs_path, "tex" if is_tex_archive else "", os.path.basename(filename.lower()))
print("Copying %s to %s" % (filename, target_path))
# if os.path.exists(target_path):
# os.unlink(target_path)
shutil.copy2(filename, target_path)
#DEBUG shutil.rmtree(tmp_path)
# Copy db files
output_db_path = os.path.join("data_mods", "omnimix")
db_paths = [
os.path.join("omnimix_data_install", "omnimix_old", "db"),
os.path.join("omnimix_data_install", "omnimix_new", "db"),
]
db_filenames = []
for path in db_paths:
if not os.path.exists(path):
continue
for filename in sorted(glob.glob(os.path.join(path, "*.xml"))):
if filename not in db_filenames:
db_filenames.append(filename)
copytree(path, output_db_path)
# Cleanup tmp folder since it's no longer needed
#shutil.rmtree("tmp")
print("Done!")

253
omnimix/omnimix_db.md Normal file
View File

@ -0,0 +1,253 @@
# Omnimix databases documentation
## database load order
All database files that are to be loaded must reside in data_mods/your_mod_name/
They will be loaded in alphabetical order so name them with respect to the order you want them to be loaded. Also be mindful of your own mod folder name.
Example:
/data_mods
/crm_custom_1
/custom_musicdb_crm1.xml
/custom_musicdb_zzz.xml
/milo_custom_2
/custom_musicdb_milo2.xml
custom_musicdb_crm1.xml and custom_musicdb_zzz.xml will be loaded first (in this order), because of the folder name.
NOTE: popnhax will load all character data from all mod folders before loading any music data, regardless of the order.
The same `id` can be listed in multiple files. For example, music ID 100 can be in both `omnimix_musicdb_0.xml` and `omnimix_musicdb_1.xml`. In that case, the data specified in the file loaded later will overwrite the previously loaded data.
Both the music database and character database XML files allow for partial loading. That is, you can specify only the data you wish to modify instead of copying all of the information associated with that entry. This is useful, for example, if you want to change a song's title, difficulty, add a single chart, etc.
For example, this is a valid database file that will patch only the specified data:
```xml
<?xml version='1.0' encoding='shift-jis'?>
<database>
<music idx="1">
<title __type="str">New Title</title>
</music>
<music idx="2">
<genre __type="str">New Genre</genre>
</music>
<music idx="3">
<charts>
<chart idx="hp">
<diff __type="u8">12</diff>
</chart>
</charts>
</music>
<chara idx="100">
<disp_name __type="str">New Chara Name</disp_name>
</music>
</database>
```
## Music database format
```xml
<music id="3000">
<fw_genre __type="str">ニューカマー</fw_genre>
<fw_title __type="str"></fw_title>
<fw_artist __type="str">サトウチアキ</fw_artist>
<genre __type="str">ニューカマー</genre>
<title __type="str">un-Balance</title>
<artist __type="str">佐藤千晶</artist>
<chara1 __type="str">kate_3a</chara1>
<chara2 __type="str">kate_3b</chara2>
<mask __type="u32">32</mask>
<folder __type="u32">3</folder>
<cs_version __type="u32">0</cs_version>
<categories __type="u32">0</categories>
<charts>
<chart idx="np">
<folder __type="str">omni_cs</folder>
<filename __type="str">ac3_newcommer_0</filename>
<audio_param1 __type="s32">50</audio_param1>
<audio_param2 __type="s32">50</audio_param2>
<audio_param3 __type="s32">0</audio_param3>
<audio_param4 __type="s32">0</audio_param4>
<file_type __type="u32">0</file_type>
<used_keys __type="u16">0</used_keys>
<diff __type="u8">24</diff>
<hold_flag __type="u8">0</hold_flag>
</chart>
</charts>
<ha __type="str"></ha>
<chara_x __type="u32">0</chara_x>
<chara_y __type="u32">0</chara_y>
<unk1 __type="u16" __count="32">0 0 0 0 0 0 36 0 0 59 77 0 0 0 0 134 0 0 68 67 222 0 0 0 0 0 0 0 0 0 0 0</unk1>
<display_bpm __type="u16" __count="12">160 0 0 0 0 0 0 0 0 0 0 0</display_bpm>
</music>
```
`music` is as follows:
- `idx` is the music ID.
- `genre`/`title`/`artist` are self explanatory.
- `fw_genre`/`fw_title`/`fw_artist` are the Japanese full width versions of - `genre`/`title`/`artist`. Generally the alphanumeric characters are uppercased for official songs but the game is not strict.
These fields are used to sort, therefore DO NOT USE HIRAGANA OR KANJI else your entry will not appear at the correct position in the songlist.
- `chara1`/`chara2` correspond to the `chara_id` in the character database XML.
- `folder` is the game version number.
- `cs_version` is the game version number for CS versioning.
- `categories` is a bitfield for categories
- 0x0001: beatmania
- 0x0002: IIDX
- 0x0004: DDR
- 0x0008: Gitadora
- 0x0010: Mambo a Go Go
- 0x0020: pop'n stage
- 0x0040: Keyboarmania
- 0x0080: Dance Maniax
- 0x0100: bmIII
- 0x0200: Toy's March
- 0x0400: ee'mall (only ee'mall originals have this set)
- 0x0800: jubeat
- 0x1000: Reflec Beat
- 0x2000: SDVX
- 0x4000: BeatStream
- 0x8000: Nostalgia
- `ha` is the hariai image. For example, songs that display jackets use the hariai image with a specific bit in the `mask` set. to display the jacket on the music select screen.
- `chara_x` and `chara_y` refers to the position of the character's face in the portrait. It's used to position the speech bubble and centering the image during the popout animation in the options screen.
- `unk1` is unknown data.
- `display_bpm` is an array of twelve values, consisting of the low ends of the bpm ranges for each chart followed by the highest ends. If both ends are the same, a constant bpm is displayed. If both values are negative, a question mark is displayed instead of the high value (e.g. Simonman songs). The popnhax parser won't take negative values, but the unsigned representations (=65535) in decimal work.
- `mask` is a bitfield that covers a lot of things
- 0x00000008: Display a fake BPM range at the options screen, as defined by 'display_bpm'
- 0x00000020: The alternate hariai image (set by using 0x800000) is a song jacket instead of a character portrait
- 0x00000080: Seems to be related to locking away songs
- 0x00010000: TV/J-Pop category flag is
- 0x00080000: Easy chart flag
- 0x00800000: Required for songs that show a hariai image on the music selection screen
- 0x01000000: Hyper chart flag
- 0x02000000: Ex chart flag
- 0x04000000: Battle hyper chart flag
- 0x08000000: Seems to be related to locking away songs
- 0x80000000: Default for placeholder songs, so it's probably used to skip those songs
- Anything else is undocumented here
`chart` is as follows:
- `idx` is the labeling for the difficulty: ep (Easy), np (Normal), hp (Hyper), op (Ex), bp_n (Battle Normal), bp_h (Battle Hyper).
- `folder` is the folder in `data/sd/` where the file can be found.
- `filename` is the base filename within `data/sd/<folder>`.
- `audio_param1`/`audio_param2`/`audio_param3`/`audio_param4` are something to do with the audio parameters. I believe `audio_param1` is BGM volume and `audio_param2` is keysound volume, or something like that. I haven't looked into this too much so consider it undocumented.
- `diff` is the difficulty of the chart. If set to 0 then the chart will be unselectable in-game (useful for normal and battle normal which are assumed to always be available by the game). The size of `diff` is 8 bits so its theoretical range is 0-255.
- `hold_flag` is whether the song should display as having hold notes or not on the music selection screen.
- `force_new_chart_format` forces popnhax to tell the game that the specified chart is in the new format (12 byte entries, allows for hold notes) instead of the old format (8 byte entries).
- `used_keys` is a bitfield that tells the game what notes were used in a chart. Only displayed when looking at easy charts on the music selection screen. You should see a pop'n music controller with buttons highlighted in the corner when selecting easy charts.
- `file_type` has two distinct usages. If `file_type` is <= 5 then it will look for `<filename>_<file_type>.ifs`. If `file_type` is > 5 then it'll look for `<filename>_diff.ifs`.
## Character database format
```xml
<chara id="1500">
<chara_id __type="str">wac_18b</chara_id>
<flags __type="u32">0</flags>
<folder __type="str">18</folder>
<gg __type="str">gg_wac_18b</gg>
<cs __type="str">cs_wac_18b</cs>
<icon1 __type="str">cs_wac_18a</icon1>
<icon2 __type="str">cs_wac_18b</icon2>
<chara_xw __type="u16">128</chara_xw>
<chara_yh __type="u16">75</chara_yh>
<display_flags __type="u32">256</display_flags>
<flavor>
<phrase1 __type="str">ガジガジ</phrase1>
<phrase2 __type="str">キューキュー</phrase2>
<phrase3 __type="str">ニコニコ♪</phrase3>
<phrase4 __type="str">トボトボ…</phrase4>
<phrase5 __type="str">ウキャウキャ</phrase5>
<phrase6 __type="str">グルグル</phrase6>
<birthday __type="str">コノマエ</birthday>
<chara1_birth_month __type="u8">1</chara1_birth_month>
<chara2_birth_month __type="u8">0</chara2_birth_month>
<chara3_birth_month __type="u8">0</chara3_birth_month>
<chara1_birth_date __type="u8">1</chara1_birth_date>
<chara2_birth_date __type="u8">0</chara2_birth_date>
<chara3_birth_date __type="u8">0</chara3_birth_date>
<style1 __type="u16">1</style1>
<style2>
<fontface __type="u32">5</fontface>
<color __type="u32">6204672</color>
<height __type="u32">28</height>
<width __type="u32">19</width>
</style2>
<style3 __type="u16">28</style3>
</flavor>
<chara_variation_num __type="u8">1</chara_variation_num>
<sort_name __type="str">ヒトリ</sort_name>
<disp_name __type="str">ヒトリ</disp_name>
<file_type __type="u32">0</file_type>
<lapis_shape __type="str"></lapis_shape>
<lapis_color __type="str"></lapis_color>
<ha __type="str">ha_wac_18b</ha>
<catchtext __type="str"></catchtext>
<win2_trigger __type="s16">0</win2_trigger>
<game_version __type="u32">18</game_version>
</chara>
```
`chara` is as follows:
- `idx` is the character ID.
- `chara_id` is the base name of the character IFS.
- `flags` is a bitfield
- 0x001: Character is dummied out
- 0x002: Not playable
- 0x004: Appears in the CS category
- 0x008: Appears in the TV&Anime category
- 0x010: Must be unlocked by unlocking at least one of their songs
- 0x020: Can't use deco parts. Only used by Funassyi and doesn't even work after eclale.
- 0x040: Must be unlocked by playing a round with the previous variation (unlocking P2 colors)
- 0x080: Not sure, but seems to be used with the alternate portraits that were unlockable in Lapistoria
- 0x200: Special color category, which was removed after Lapistoria
- 0x400: Is from another BEMANI game, and thus appears in the BEMANI & GAMES category
- 0x800: Is from a non-BEMANI Konami game, and thus appears in the BEMANI & GAMES category
- `folder` is the folder in `sd/tex/` where the character data is located.
- `gg`/`cs`/`icon1`/`icon2` are images associated with the character.
- `chara_xw `/`chara_yh` refers to the position of the character's face in the portrait. It's used to position the speech bubble and centering the image during the popout animation in the options screen.
- `display_flags` is (probably) a bitfield but I have not looked into this.
- `chara_variation_num` is the variation number of a character. This number increases for characters with a lot of alternative styles.
- `sort_name` and `disp_name` are self explanatory.
- `file_type` has two distinct usages. If `file_type` is <= 5 then it will look for `<filename>_<file_type>.ifs`. If `file_type` is > 5 then it'll look for `<filename>_diff.ifs`.
- `lapis_shape` is either blank or `dia`/`tear`/`heart`/`squ`.
- `lapis_color` is either blank or `blue`/`pink`/`red`/`green`/`normal`/`yellow`/`purple`/`black`.
- `ha` is the hariai image.
- `catchtext` is the catchphrase text that shows over top the image on the character selection screen.
- `win2_trigger` is undocumented.
- `game_version` is the source game version number.
`flavor` is as follows:
- `phrase1`/`phrase2`/`phrase3`/`phrase4`/`phrase5`/`phrase6` are the phrases a character can say.
- `birthday` is a string to be displayed in the birthday field on the character selection screen.
- `chara1_birth_month`/`chara2_birth_month`/`chara3_birth_month` and `chara1_birth_date`/`chara2_birth_date`/`chara3_birth_date` are the numeric values for the respective character's birth date and month.
- `style1` is undocumented.
- `style3` is undocumented.
`style2` is as follows:
- `fontface` is undocumented but changing it changes the font used for the character's text.
- `color` is the RGB values for the character's text. Example: `6204672` = `#5EAD00`.
- `width`/`height` are self explanatory.
## Custom pack folder structure
- the song pack should go in `data_mods`
- you should have a unique folder name for your mod (e.g. `data_mods\crm_custom_2`)
- database files must go in your mod folder root (e.g. `data_mods\crm_custom_2\custom_musicdb_crm2.xml`)
- all sound data .ifs must go in the sd subfolder and within another unique folder name to avoid collision with other packs (e.g. `data_mods\crm_custom_2\sd\crm_custom\despacito.ifs`)
- ha_merge.ifs, kc_diff.ifs and other files should follow the original data structure (e.g. `data_mods\crm_custom_2\tex\system\ha_merge_ifs\ha_despacito.ifs`.
- Thanks to mon's LayeredFS, any file from the `data` folder can be modded this way
- NOTE: do not pack an .ifs file unless it is meant to hide some files from the game. For example it is ok to have several mods with a `ha_merge_ifs` folder, but do not use a `ha_merge.ifs` file or it will entirely replace the original file rather than merging with it.
- IMPORTANT NOTE: use only lowercase filenames both inside your .ifs and for the .ifs themselves. Failing to do so might crash the game.
- Make sure your xml files are shift-jis encoded, else your songs won't appear ingame.
## Good practice for custom chart makers
Use this url to avoid songid collisions with other chart makers
`https://docs.google.com/spreadsheets/d/18qPEH5OZH67Blq6ySlHRnxxfmojFmgG7GQ80Wyd21zY/edit?usp=sharing`
- do not use hiragana or kanji in the `fw_genre`/`fw_title`/`fw_artist` fields, they are used to sort the songlist.
- use only lowercase filenames both inside your .ifs and for the .ifs themselves
- it is not required to use lowercase filenames for the .xml files but it's better to do it
- use a unique sd subfolder name (e.g. sd/custom_milo/) to avoid filename collision with other packs

370
omnimix/popndll.py Normal file
View File

@ -0,0 +1,370 @@
import copy
import pefile
from lxml.etree import parse as etree_parse
from lxml.builder import E
CHART_MASKS = [0x00080000, 0, 0x01000000, 0x02000000, 0, 0x04000000, None]
def is_placeholder_song(c):
return c['fw_genre'] == c['fw_title'] == c['fw_artist'] == c['genre'] == c['title'] == c['artist'] == ''
def is_placeholder_chara(c):
return c['flags'] & 3 != 0
def translate_konami_string(data):
replace_str = [
# ["鶉", "ó"],
# ["鶇", "ö"],
# ["圈", "é"],
# ["鶫", "²"],
# ["鵝", "7"],
# ["囿", "♡"],
# ["囂", "♡"],
# ["鵑", ""],
# ["鶚", "㊙"],
# ["鵺", "Ü"],
# ["圄", "à"],
# ["圖", "ţ"],
# ["鵤", "Ä"],
# ["塔e", "∮テ"],
# ["囎", ":"],
# ["鵙", "ǝ"],
# ["圉", "ä"],
]
strdata = data.decode('cp932', errors="ignore").strip('\0')
for c in replace_str:
strdata = strdata.replace(c[0], c[1])
return strdata
def calculate_struct_len(data_struct):
return sum([data_struct[k][0] * data_struct[k][1] for k in data_struct])
def read_struct_data(pe, data_struct, data, index):
data_struct_len = calculate_struct_len(data_struct)
offset = index * data_struct_len
output = {
'_id': index,
'_type': data_struct,
}
idx = 0
for k in data_struct:
dsize, dcount, is_ptr = data_struct[k][:3]
if dcount > 1:
output[k] = []
for i in range(dcount):
if 'string' in data_struct[k]:
cur_data = translate_konami_string(data[offset+idx:offset+idx+dsize])
else:
cur_data = int.from_bytes(data[offset+idx:offset+idx+dsize], 'little', signed='signed' in data_struct[k])
if 'ignore' in data_struct[k] and cur_data != 0:
print(index)
print("Field set to be ignored, but it has non-zero data")
import hexdump
hexdump.hexdump(data[offset:offset+data_struct_len])
exit(1)
if is_ptr:
# Remove image base (0x10000000) from pointer address and get string data
cur_data = translate_konami_string(pe.get_string_at_rva(rva=cur_data - 0x10000000))
if dcount == 1:
output[k] = cur_data
else:
output[k].append(cur_data)
idx += dsize
if 'ignore' in data_struct[k] or 'ignore_silent' in data_struct[k]:
del output[k]
return output
def get_type(struct, k):
if 'charts' in struct[k]:
return "charts"
if 'string' in struct[k] or struct[k][2] == True:
return "str"
size = struct[k][0] * 8
sign = "s" if 'signed' in struct[k] else "u"
return "%s%d" % (sign, size)
def serialize_data_charts(x):
output = []
for chart_idx, chart in enumerate(x):
if chart == 0:
continue
idx = chart.get('_idx', str(chart_idx))
if '_idx' in chart:
del chart['_idx']
output.append(E('chart', *serialize_data(chart), idx=idx))
return output
def serialize_data(x):
ret = []
for k in x:
if k.startswith("_") or x[k] is None:
continue
if get_type(x['_type'], k) in ['charts']:
ret.append(E(k, *serialize_data_charts(x[k])))
elif type(x[k]) in [list, dict] and '_type' in x[k]:
ret.append(E(k, *serialize_data(x[k])))
elif type(x[k]) in [list]:
ret.append(E(
k,
" ".join([str(v) for v in x[k]]) if type(x[k]) in [list] else str(x[k]),
__type=get_type(x['_type'], k),
__count=str(len(x[k]) if type(x[k]) in [list] else 1),
))
else:
ret.append(E(
k,
" ".join([str(v) for v in x[k]]) if type(x[k]) in [list] else str(x[k]),
__type=get_type(x['_type'], k),
))
return ret
def parse_database_from_dll(input_dll_filename, input_patch_xml_filename):
# Format: [size, num, ptr_flag]
data_struct_song = {
'fw_genre': [4, 1, True],
'fw_title': [4, 1, True],
'fw_artist': [4, 1, True],
'genre': [4, 1, True],
'title': [4, 1, True],
'artist': [4, 1, True],
'chara1': [2, 1, False],
'chara2': [2, 1, False],
'mask': [4, 1, False],
'folder': [4, 1, False],
'cs_version': [4, 1, False],
'categories': [4, 1, False],
'diffs': [1, 6, False],
'charts': [2, 7, False, 'charts'],
'ha': [4, 1, True],
'chara_x': [4, 1, False], # Hariai positioning it seems
'chara_y': [4, 1, False], # Hariai positioning it seems
'unk1': [2, 32, False],
'display_bpm': [2, 12, False],
'hold_flags': [1, 8, False],
}
data_struct_file = {
'folder': [4, 1, True],
'filename': [4, 1, True],
'audio_param1': [4, 1, False, 'signed'], # Something relating to volume/pan/etc?
'audio_param2': [4, 1, False, 'signed'], # Something relating to volume/pan/etc?
'audio_param3': [4, 1, False, 'signed'], # Something relating to volume/pan/etc?
'audio_param4': [4, 1, False, 'signed'], # Something relating to volume/pan/etc?
'file_type': [4, 1, False], # <= 0 is shiri.ifs, <= 5 is shiri_%d.ifs, anything else is shiri_diff.ifs
'used_keys': [2, 1, False], # Bit field that says what notes were used in the chart
'pad': [2, 1, False, 'ignore'],
}
data_struct_chara = {
'chara_id': [4, 1, True],
'flags': [4, 1, False], # Controls visibility, etc. bit 1 = C_DEL, bit 2 = CPU-only, bit 5 = disabled/off?
'folder': [4, 1, True],
'gg': [4, 1, True],
'cs': [4, 1, True],
'icon1': [4, 1, True],
'icon2': [4, 1, True],
'chara_xw': [2, 1, False], # Some kind of width or x position. If mask in data_struct_song has bit 23 (0x800000) set then this is ignored
'chara_yh': [2, 1, False], # Some kind of height or y position. If mask in data_struct_song has bit 23 (0x800000) set then this is ignored
'display_flags': [4, 1, False], # Some kind of bitfield flags.
# If bit 1 is set then linear = 1
# If bit 0 is not set then copy (flags2 & 2) into the linear flag field (doesn't have any effect?)
# If bit 6 (0x20) is set then clipping = 1
# If bit 6 (0x20) is not set then copy (flags & 0x10) >> 3 into the clipping flag field
# Bit 8 (0x100) is unused?? Is set for gg_mimi_15a
'flavor': [2, 1, False, 'signed'],
'chara_variation_num': [1, 1, False],
'pad': [1, 1, False, 'ignore'],
'sort_name': [4, 1, True],
'disp_name': [4, 1, True],
'file_type': [4, 1, False], # <= 0 is shiri.ifs, <= 5 is shiri_%d.ifs, anything else is shiri_diff.ifs
'lapis_shape': [4, 1, False], # non/dia/tear/heart/squ
'lapis_color': [1, 1, False], # non/blue/pink/red/green/normal/yellow/purple/black
'pad2': [1, 3, False, 'ignore'],
'ha': [4, 1, True],
'catchtext': [4, 1, True],
'win2_trigger': [2, 1, False, 'signed'], # If played against a specific character ID, it triggers a win 2 animation
'pad3': [1, 2, False, 'ignore'],
'game_version': [4, 1, False], # What version this particular style was introduced
}
data_struct_flavors = {
'phrase1': [13, 1, False, 'string'],
'phrase2': [13, 1, False, 'string'],
'phrase3': [13, 1, False, 'string'],
'phrase4': [13, 1, False, 'string'],
'phrase5': [13, 1, False, 'string'],
'phrase6': [13, 1, False, 'string'],
'pad': [2, 1, False, 'ignore'],
'birthday': [4, 1, True],
'chara1_birth_month': [1, 1, False],
'chara2_birth_month': [1, 1, False],
'chara3_birth_month': [1, 1, False],
'chara1_birth_date': [1, 1, False],
'chara2_birth_date': [1, 1, False],
'chara3_birth_date': [1, 1, False],
'style1': [2, 1, False], # Font and other related stylings
'style2': [2, 1, False], # Font and other related stylings
'style3': [2, 1, False], # Font and other related stylings
}
data_struct_fontstyle = {
'fontface': [4, 1, False],
'color': [4, 1, False],
'height': [4, 1, False],
'width': [4, 1, False],
}
# Read XML file
patch_xml = etree_parse(input_patch_xml_filename)
music_db_limit = patch_xml.find('limits').find('music').text
music_db_limit = int(music_db_limit, 16 if music_db_limit.startswith("0x") else 10)
chart_table_limit = patch_xml.find('limits').find('chart').text
chart_table_limit = int(chart_table_limit, 16 if chart_table_limit.startswith("0x") else 10)
style_table_limit = patch_xml.find('limits').find('style').text
style_table_limit = int(style_table_limit, 16 if style_table_limit.startswith("0x") else 10)
flavor_table_limit = patch_xml.find('limits').find('flavor').text
flavor_table_limit = int(flavor_table_limit, 16 if flavor_table_limit.startswith("0x") else 10)
chara_table_limit = patch_xml.find('limits').find('chara').text
chara_table_limit = int(chara_table_limit, 16 if chara_table_limit.startswith("0x") else 10)
music_db_addr = int(patch_xml.find('buffer_base_addrs').find('music').text, 16)
chart_table_addr = int(patch_xml.find('buffer_base_addrs').find('chart').text, 16)
style_table_addr = int(patch_xml.find('buffer_base_addrs').find('style').text, 16)
flavor_table_addr = int(patch_xml.find('buffer_base_addrs').find('flavor').text, 16)
chara_table_addr = int(patch_xml.find('buffer_base_addrs').find('chara').text, 16)
# Modified an old one off script for this so I don't feel like refactoring it too much to get rid of these
music_db_end_addr = (music_db_limit) * calculate_struct_len(data_struct_song) + music_db_addr
chart_table_end_addr = (chart_table_limit) * calculate_struct_len(data_struct_file) + chart_table_addr
style_table_end_addr = (style_table_limit) * calculate_struct_len(data_struct_fontstyle) + style_table_addr
flavor_table_end_addr = (flavor_table_limit) * calculate_struct_len(data_struct_flavors) + flavor_table_addr
chara_table_end_addr = (chara_table_limit) * calculate_struct_len(data_struct_chara) + chara_table_addr
pe = pefile.PE(input_dll_filename, fast_load=True)
# Read font style table
data = pe.get_data(style_table_addr - 0x10000000, style_table_end_addr - style_table_addr)
fontstyle_table = [read_struct_data(pe, data_struct_fontstyle, data, i) for i in range(len(data) // calculate_struct_len(data_struct_fontstyle))]
# Read flavor table
data = pe.get_data(flavor_table_addr - 0x10000000, flavor_table_end_addr - flavor_table_addr)
flavor_table = [read_struct_data(pe, data_struct_flavors, data, i) for i in range(len(data) // calculate_struct_len(data_struct_flavors))]
for c in flavor_table:
if c['style2'] == 0:
c['style2'] = None
elif c['style2'] - 11 >= 0:
c['style2'] = fontstyle_table[c['style2'] - 11]
# Read chara table
data = pe.get_data(chara_table_addr - 0x10000000, chara_table_end_addr - chara_table_addr)
charadb = [read_struct_data(pe, data_struct_chara, data, i) for i in range(len(data) // calculate_struct_len(data_struct_chara))]
data_struct_chara['lapis_shape'].append('string')
data_struct_chara['lapis_color'].append('string')
flavors = []
for c in charadb:
c['lapis_shape'] = ["", "dia", "tear", "heart", "squ"][c['lapis_shape']]
c['lapis_color'] = ["", "blue", "pink", "red", "green", "normal", "yellow", "purple", "black"][c['lapis_color']]
flavors.append(c['flavor'])
c['flavor'] = flavor_table[c['flavor']] if c['flavor'] >= 0 else None
# Read chart/file table
data = pe.get_data(chart_table_addr - 0x10000000, chart_table_end_addr - chart_table_addr)
file_lookup = [read_struct_data(pe, data_struct_file, data, i) for i in range(len(data) // calculate_struct_len(data_struct_file))]
# Read music database
data = pe.get_data(music_db_addr - 0x10000000, music_db_end_addr - music_db_addr)
musicdb = [read_struct_data(pe, data_struct_song, data, i) for i in range(len(data) // calculate_struct_len(data_struct_song))]
# Add connections to other tables
data_struct_song['chara1'].append('string')
data_struct_song['chara2'].append('string')
for c in musicdb:
c['_type'] = copy.deepcopy(c['_type'])
if not is_placeholder_song(c):
charts = []
for chart_idx, idx in enumerate(c['charts']):
if CHART_MASKS[chart_idx] is not None and (CHART_MASKS[chart_idx] == 0 or c['mask'] & CHART_MASKS[chart_idx] != 0):
charts.append(copy.deepcopy(file_lookup[idx]))
charts[-1]['_type'] = copy.deepcopy(charts[-1]['_type'])
charts[-1]['_type']['diff'] = [1, 1, False]
charts[-1]['_type']['hold_flag'] = [1, 1, False]
charts[-1]['diff'] = c['diffs'][chart_idx]
charts[-1]['_id'] = chart_idx
charts[-1]['_idx'] = ['ep', 'np', 'hp', 'op', 'bp_n', 'bp_h'][chart_idx]
charts[-1]['hold_flag'] = c['hold_flags'][chart_idx]
else:
charts.append(0)
c['charts'] = charts
# Remove chart mask flags because they'll be added later in popnmusichax based on the charts available
mask_full = sum([x for x in CHART_MASKS if x is not None])
c['mask'] = c['mask'] & ~mask_full
for k in ['diffs', 'hold_flags']:
if k in c['_type']:
del c['_type'][k]
if k in c:
del c[k]
c['chara1'] = charadb[c['chara1']]['chara_id'] if c['chara1'] != 0 else 0
c['chara2'] = charadb[c['chara2']]['chara_id'] if c['chara2'] != 0 else 0
database = {
'musicdb': musicdb,
'charadb': charadb
}
return database

503
omnimix/verify_data.py Normal file
View File

@ -0,0 +1,503 @@
import argparse
import os
import sys
import ifstools
import popndll
from enum import Enum
from lxml.etree import tostring, fromstring
from lxml.builder import E
class DataErrors(Enum):
SD_PATH_NOT_EXIST = 1
SD_IFS_NOT_EXIST = 2
SD_CHARTS_NOT_FOUND = 3
SD_CHARTS_UNUSED = 4
KC_NOT_FOUND = 5
BG_NOT_FOUND = 6
CHARA_IFS_NOT_FOUND = 7
CHARA_IFS_INNER_NOT_FOUND = 8
IFS_READ_ERROR = 9
SD_CHART_ERROR = 10
CHART_LABELS = ["ep", "np", "hp", "op", "bp", "bp"]
CHART_MASKS = [0x00080000, 0, 0x01000000, 0x02000000, 0, 0x04000000]
def elem2dict(node):
"""
Convert an lxml.etree node tree into a dict.
Source: https://gist.github.com/jacobian/795571#gistcomment-2810160
"""
result = {}
idx = node.get('id', None)
if idx:
result['_id'] = int(idx)
idx = node.get('idx', None)
if idx:
labels = ["ep", "np", "hp", "op", "bp_n", "bp_h"]
if idx in labels:
idx = labels.index(idx)
result['_id'] = int(idx)
for element in node.iterchildren():
# Remove namespace prefix
key = element.tag.split('}')[1] if '}' in element.tag else element.tag
if key == 'charts':
value = [0] * 7
for chart in element.iterchildren():
chart_data = elem2dict(chart)
value[chart_data['_id']] = chart_data
else:
# Process element as tree element if the inner XML contains non-whitespace content
if element.text and element.text.strip():
elm_type = element.get('__type', None)
elm_count = element.get('__count', None)
value = element.text
if elm_count:
value = value.split(' ')
if elm_type in ['u8', 's8', 'u16', 's16', 'u32', 's32']:
if type(value) is list:
value = [int(x) for x in value]
else:
value = int(value)
else:
value = elem2dict(element)
result[key] = value
return result
def convert_db_to_dict(db):
return {i: entry for i, entry in enumerate(db)}
def load_patch_dbs(input_db_folder, databases):
def get_sequential_files(db, master_xml_path, target_elm):
master_xml = fromstring(open(master_xml_path, "rb").read().replace(b"shift-jis", b"cp932"))
for filename in master_xml.findall('filename'):
patch_xml = fromstring(open(os.path.join(input_db_folder, filename.text), "rb").read().replace(b"shift-jis", b"cp932"))
for elm in patch_xml.findall(target_elm):
idx = int(elm.get('id'))
new_entry = elem2dict(elm)
if idx not in db:
db[idx] = new_entry
else:
if 'charts' in db[idx]:
for chart in new_entry.get('charts', []):
if chart == 0:
continue
if db[idx]['charts'][chart['_id']] == 0:
db[idx]['charts'][chart['_id']] = chart
else:
db[idx]['charts'][chart['_id']].update(chart)
if 'charts' in new_entry:
del new_entry['charts']
db[idx].update(new_entry)
return db
master_xml_path = os.path.join(input_db_folder, "master.xml")
if not os.path.exists(master_xml_path):
return databases
databases['charadb'] = get_sequential_files(databases['charadb'], master_xml_path, "chara")
databases['musicdb'] = get_sequential_files(databases['musicdb'], master_xml_path, "music")
return databases
def verify_chart(data):
assert(len(data) > 0)
event_size = 12
if len(data) / 12 != len(data) // 12:
# The chart data should be divisble both as a float and int and get the same result
event_size = 8
elif len(data) / 8 != len(data) // 8:
# The chart data should be divisble both as a float and int and get the same result
event_size = 12
else:
# You can still get cases where the above check is true for 8 byte events so do more checking
marker_8 = sorted(list(set([data[i+4] for i in range(0, len(data), 8)])))
marker_12 = sorted(list(set([data[i+4] for i in range(0, len(data), 12)])))
marker_8_diff = list(set(marker_8) - set([0x00, 0x45]))
marker_12_diff = list(set(marker_12) - set([0x00, 0x45]))
if len(marker_8_diff) > 0 and len(marker_12_diff) == 0:
event_size = 12
elif len(marker_8_diff) == 0 and len(marker_12_diff) > 0:
event_size = 8
elif len(marker_8_diff) == 0 and len(marker_12_diff) == 0:
# Inconclusive, do more testing
cmd_8 = sorted(list(set([data[i+5] for i in range(0, len(data), 8)])))
cmd_12 = sorted(list(set([data[i+5] for i in range(0, len(data), 12)])))
cmd_8_diff = list(set(cmd_8) - set([1, 2, 3, 4, 5, 6, 7, 8, 10, 11]))
cmd_12_diff = list(set(cmd_12) - set([1, 2, 3, 4, 5, 6, 7, 8, 10, 11]))
if len(cmd_8_diff) > 0 and len(cmd_12_diff) == 0:
event_size = 12
elif len(cmd_8_diff) == 0 and len(cmd_12_diff) > 0:
event_size = 8
else:
raise Exception("Couldn't determine size of chart events")
events_by_cmd = {}
events = []
for i in range(0, len(data), event_size):
chunk = data[i:i+event_size]
if len(chunk) != event_size:
break
timestamp = int.from_bytes(chunk[:4], 'little')
marker = chunk[4]
cmd = chunk[5] & 0x0f
param1 = chunk[5] >> 4
param2 = chunk[6:8]
param3 = chunk[8:] if event_size == 12 else 0
# import hexdump
# hexdump.hexdump(chunk)
event = (chunk, timestamp, marker, cmd, param1, param2, param3)
events.append(event)
# is_valid_marker = (cmd in [0x0a, 0x0b] and marker == 0) or (cmd not in [0x0a, 0x0b] and marker == 0x45)
# assert(is_valid_marker == True)
if cmd not in events_by_cmd:
events_by_cmd[cmd] = []
events_by_cmd[cmd].append(event)
chart_is_sequential = events == sorted(events, key=lambda x:x[1])
assert(chart_is_sequential == True)
chart_has_timings = 0x08 in events_by_cmd and len(events_by_cmd.get(0x08, [])) >= 6
assert(chart_has_timings == True)
chart_has_timings_at_zero = 0x08 in events_by_cmd and min([x[1] for x in events_by_cmd.get(0x08, [])]) == 0
assert(chart_has_timings_at_zero == True)
chart_timings = {x[5][1] >> 4: (x[5][1] & 0x0f) | x[5][0] for x in events_by_cmd.get(0x08, [])}
chart_has_sequential_timings = sorted([chart_timings[k] for k in range(6)]) == [chart_timings[k] for k in range(6)]
assert(chart_has_sequential_timings == True)
standard_timings = [
0x76, # Early bad
0x7a, # Early good
0x7e, # Early great
0x84, # Late great
0x88, # Late good
0x8c, # Late bad
]
chart_has_sensible_timings = [abs(chart_timings[k] - standard_timings[k]) < 15 for k in range(6)]
chart_has_sensible_timings = list(set(chart_has_sensible_timings)) == [True]
assert(chart_has_sensible_timings == True)
chart_has_bpm = 0x04 in events_by_cmd and len(events_by_cmd.get(0x04, [])) > 0
assert(chart_has_bpm == True)
chart_has_bpm_at_zero = 0x04 in events_by_cmd and min([x[1] for x in events_by_cmd.get(0x04, [])]) == 0
assert(chart_has_bpm_at_zero == True)
chart_has_valid_bpms = 0x04 in events_by_cmd and min([int.from_bytes(x[5], 'little') for x in events_by_cmd.get(0x04, [])]) >= 0
assert(chart_has_valid_bpms == True)
chart_has_metronome = 0x05 in events_by_cmd and len(events_by_cmd.get(0x05, [])) > 0
assert(chart_has_metronome == True)
chart_has_metronome_at_zero = 0x05 in events_by_cmd and min([x[1] for x in events_by_cmd.get(0x05, [])]) == 0
assert(chart_has_metronome_at_zero == True)
used_notes = sorted(list(set([x[5][0] for x in events_by_cmd.get(0x01, [])])))
is_valid_range_notes = not used_notes or (min(used_notes) >= 0 and max(used_notes) <= 8)
assert(is_valid_range_notes == True)
chart_has_notes = len(used_notes) > 0
assert(chart_has_notes == True)
used_notes = sorted(list(set([x[5][1] >> 4 for x in events_by_cmd.get(0x02, [])])))
is_valid_range_keysound_range = not used_notes or (min(used_notes) >= 0 and max(used_notes) <= 8)
assert(is_valid_range_notes == True)
chart_has_keysounds = len(used_notes) > 0
assert(chart_has_keysounds == True)
used_notes = sorted(list(set([x[5][1] >> 4 for x in events_by_cmd.get(0x07, [])])))
is_valid_range_auto_keysound_range = not used_notes or (min(used_notes) >= 0 and max(used_notes) <= 15)
assert(is_valid_range_auto_keysound_range == True)
chart_has_measures = len(events_by_cmd.get(0x0a, [])) > 0
assert(chart_has_measures == True)
chart_has_beats = len(events_by_cmd.get(0x0b, [])) > 0
assert(chart_has_beats == True)
chart_has_bgm_start = len(events_by_cmd.get(0x03, [])) > 0
assert(chart_has_bgm_start == True)
chart_has_single_bgm_start = len(events_by_cmd.get(0x03, [])) == 1
assert(chart_has_single_bgm_start == True)
chart_has_ending = len(events_by_cmd.get(0x06, [])) > 0
assert(chart_has_ending == True)
# chart_has_single_ending = len(events_by_cmd.get(0x06, [])) == 1
# assert(chart_has_single_ending == True)
if event_size == 12:
hold_events = [(x[5][0], x[1], x[1] + int.from_bytes(x[6], 'little'), x[0]) for x in events_by_cmd.get(0x01, []) if int.from_bytes(x[6], 'little') > 0]
for hold_event in hold_events:
for x in events_by_cmd.get(0x01, []):
if x[5][0] == hold_event[0] and x[1] != hold_event[1]:
is_impossible_hold = x[1] >= hold_event[1] and x[1] < hold_event[2]
assert(is_impossible_hold == False)
chart_has_no_notes_at_zero = len([x for x in events_by_cmd.get(0x01, []) if x[1] == 0]) == 0
assert(chart_has_no_notes_at_zero == True)
return True
def verify_musicdb(musicdb, input_data_folder, is_mod_ifs):
errors = []
sd_path = os.path.join(input_data_folder, "sd")
bg_ifs_path = os.path.join(input_data_folder, "tex", "system", "bg_mod.ifs" if is_mod_ifs else "bg_diff.ifs")
bg_ifs = ifstools.IFS(bg_ifs_path)
bg_ifs_files = [str(x) for x in bg_ifs.tree.all_files]
bg_ifs.close()
kc_ifs_path = os.path.join(input_data_folder, "tex", "system", "kc_mod.ifs" if is_mod_ifs else "kc_diff.ifs")
kc_ifs = ifstools.IFS(kc_ifs_path)
kc_ifs_files = [str(x) for x in kc_ifs.tree.all_files]
kc_ifs.close()
for music_idx in musicdb:
entry = musicdb[music_idx]
if popndll.is_placeholder_song(entry):
# Skip placeholder entries
continue
# Generate mask and expected charts list
if 'mask' not in entry:
entry['mask'] = 0
expected_charts = []
for chart in entry.get('charts', []):
if chart == 0:
continue
entry['mask'] |= CHART_MASKS[chart['_id']]
if chart.get('diff', 0) == 0:
# If a song has a 0 difficulty level then the game won't make it selectable so it doesn't matter if it exists or not
continue
if CHART_LABELS[chart['_id']] not in expected_charts:
expected_charts.append(CHART_LABELS[chart['_id']])
found_charts = []
found_chart_errors = []
for chart_idx, chart in enumerate(entry['charts']):
if type(chart) is int:
# Doesn't exist
continue
sd_game_path = os.path.join(sd_path, chart['folder'])
if not os.path.exists(sd_game_path):
print("Could not find", sd_game_path)
errors.append((DataErrors.SD_PATH_NOT_EXIST, music_idx, [sd_game_path]))
sd_ifs_base_path = os.path.join(sd_path, chart['folder'], chart['filename'])
if chart['file_type'] > 0 and chart['file_type'] <= 5:
sd_ifs_base_path = "%s_%02d" % (sd_ifs_base_path, chart['file_type'])
elif chart['file_type'] > 0 and chart['file_type'] > 5:
sd_ifs_base_path = "%s_diff" % (sd_ifs_base_path)
sd_ifs_path = "%s.ifs" % (sd_ifs_base_path)
if not os.path.exists(sd_ifs_path):
print("Could not find", sd_ifs_path)
errors.append((DataErrors.SD_IFS_NOT_EXIST, music_idx, [sd_ifs_path]))
continue
preview_filename = "%s_pre.2dx" % (chart['filename'])
keysounds_filename = "%s.2dx" % (chart['filename'])
target_chart_filename = "%s_%s.bin" % (chart['filename'], CHART_LABELS[chart_idx])
ifs = ifstools.IFS(sd_ifs_path)
found_preview = False
found_keysounds = False
found_target_chart = False
for inner_filename in ifs.tree.all_files:
found_preview = inner_filename == preview_filename or found_preview
found_keysounds = inner_filename == keysounds_filename or found_keysounds
found_target_chart = inner_filename == target_chart_filename or found_target_chart
for chart_label in CHART_LABELS:
if str(inner_filename).endswith("_%s.bin" % chart_label):
found_charts.append(chart_label)
try:
verify_chart(inner_filename.load())
except BaseException as e:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback_info = traceback.extract_tb(exc_traceback)
filename, line, func, text = traceback_info[-1]
errors.append((DataErrors.SD_CHART_ERROR, music_idx, [str(inner_filename), text]))
print(errors[-1])
ifs.close()
found_charts = list(set(found_charts))
# TODO: Add check to make sure battle hyper chart exists?
unused_charts = list(set(found_charts) - set(expected_charts))
found_charts = list(set(found_charts) - set(unused_charts))
found_charts = sorted(found_charts)
expected_charts = sorted(expected_charts)
if len(unused_charts) > 0:
# print("Found unused charts:", found_charts, expected_charts, unused_charts)
# errors.append((DataErrors.SD_CHARTS_UNUSED, music_idx, [found_charts, expected_charts, unused_charts]))
pass
if found_charts != expected_charts:
errors.append((DataErrors.SD_CHARTS_NOT_FOUND, music_idx, [found_charts, expected_charts, list(set(expected_charts) - set(found_charts))]))
kc_path = "kc_%04d.ifs" % (music_idx)
if kc_path not in kc_ifs_files:
errors.append((DataErrors.KC_NOT_FOUND, music_idx, [kc_path]))
if entry.get('folder', 0) <= 21:
# Later games don't use bg_*.ifs
bg_path = "bg_%04d.ifs" % (music_idx)
if bg_path not in bg_ifs_files:
errors.append((DataErrors.BG_NOT_FOUND, music_idx, [bg_path]))
return errors
def verify_charadb(charadb, input_data_folder, is_mod_ifs):
errors = []
tex_path = os.path.join(input_data_folder, "tex")
for chara_idx in charadb:
entry = charadb[chara_idx]
if popndll.is_placeholder_chara(entry):
# Skip placeholder entries
continue
chara_ifs_base_path = os.path.join(tex_path, entry['folder'], entry['chara_id'])
if entry['file_type'] > 0 and entry['file_type'] <= 5:
chara_ifs_base_path = "%s_%02d" % (chara_ifs_base_path, entry['file_type'])
elif entry['file_type'] > 0 and entry['file_type'] > 5:
chara_ifs_base_path = "%s_diff" % (chara_ifs_base_path)
chara_ifs_path = "%s.ifs" % (chara_ifs_base_path)
if not os.path.exists(chara_ifs_path):
print("chara ifs not found:", chara_ifs_path)
errors.append((DataErrors.CHARA_IFS_NOT_FOUND, chara_idx, [chara_ifs_path]))
exit(1)
try:
chara_ifs = ifstools.IFS(chara_ifs_path)
chara_ifs_files = [str(x) for x in chara_ifs.tree.all_files]
icon1_path = os.path.join("tex", entry['icon1']) + ".png"
icon2_path = os.path.join("tex", entry['icon2']) + ".png"
gg_path = os.path.join("tex", entry['gg']) + ".png"
for inner_path in [icon1_path, icon2_path, gg_path]:
if inner_path not in chara_ifs_files:
print("chara inner file not found:", inner_path)
errors.append((DataErrors.CHARA_IFS_INNER_NOT_FOUND, chara_idx, [inner_path]))
exit(1)
except:
print("ifs read error:", chara_ifs_path)
errors.append((DataErrors.IFS_READ_ERROR, chara_idx, [chara_ifs_path]))
chara_ifs.close()
return errors
def verify_data(databases, input_data_folder, is_mod_ifs):
musicdb_errors = verify_musicdb(databases['musicdb'], input_data_folder, is_mod_ifs)
charadb_errors = verify_charadb(databases['charadb'], input_data_folder, is_mod_ifs)
for error in musicdb_errors + charadb_errors:
print(error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input-dll', help='Input DLL file', default=None, required=True)
parser.add_argument('--input-xml', help='Input XML file', default=None, required=True)
parser.add_argument('--input-data', help='Input data folder', default=None, required=True)
parser.add_argument('--input-db', help='Input db folder', default=None)
args = parser.parse_args()
databases = popndll.parse_database_from_dll(args.input_dll, args.input_xml)
databases = {k: convert_db_to_dict(databases[k]) for k in databases}
if args.input_db:
databases = load_patch_dbs(args.input_db, databases)
verify_data(databases, args.input_data, args.input_db is not None)

49
pms2bemani/README.md Normal file
View File

@ -0,0 +1,49 @@
## Requirements
1) To install the requirements needed (ifstools, pydub): `python3 -m pip install -r requirements.txt`
2) Install sox (https://sourceforge.net/projects/sox/files/sox/14.4.2/) and include it in your system PATH or put sox.exe in the folder with `pms2bemani.py`
## Usage
```
usage: pms2bemani.py [-h] [--input-bp INPUT_BP] [--input-ep INPUT_EP]
[--input-np INPUT_NP] [--input-hp INPUT_HP]
[--input-op INPUT_OP] --name NAME --keysounds-folder
KEYSOUNDS_FOLDER [--preview PREVIEW] [--new] [--ifs]
[--preview-offset PREVIEW_OFFSET]
[--preview-duration PREVIEW_DURATION]
optional arguments:
-h, --help show this help message and exit
--input-bp INPUT_BP Input file (BP)
--input-ep INPUT_EP Input file (EP)
--input-np INPUT_NP Input file (NP)
--input-hp INPUT_HP Input file (HP)
--input-op INPUT_OP Input file (OP)
--name NAME Base name used for output
--keysounds-folder KEYSOUNDS_FOLDER
Input folder containing keysounds
--preview PREVIEW Input preview file (optional, overrides preview
generation code)
--new New chart format which supports hold notes
--ifs Create IFS output instead of folder output (requires
ifstools)
--preview-offset PREVIEW_OFFSET
Offset from start in seconds (ex. 10.4 would be 10.4
seconds)
--preview-duration PREVIEW_DURATION
Length of preview in seconds
```
- Use `--new` to specify the new chart format (Usaneko and later) which supports hold notes.
- Use `--ifs` to generate an `.ifs` file instead of a folder.
- If a preview sound file is not specified with --preview, a preview will be automatically generated.
- Automatically generated previews default to 10 seconds at the mid point of the chart.
- The preview offset and duration can be customized using `--preview-offset` and `--preview-duration` respectively.
Example: `python3 pms2bemani.py --input-np wonderingbeats/01_kouunn-n.pms --input-hp wonderingbeats/02_kouunn-h.pms --input-op wonderingbeats/03_kouunn-ex.pms --keysounds-folder wonderingbeats --name wonderingbeats_convert --ifs --new --preview-offset 10.4 --preview-duration 15`
## Credits
- ifstools (https://github.com/mon/ifstools)
- 2dxTools (https://github.com/mon/2dxTools)
- bmx2wav (http://childs.squares.net/program/bmx2wav/)
- bms2bmson-python (https://github.com/iidx/bms2bmson-python)

View File

@ -0,0 +1,303 @@
# Original: https://github.com/iidx/bms2bmson-python
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import re
import sys
import json
import operator
import traceback
__author__ = "xert*"
__version__ = "0.3"
__bmsonversion__ = "1.0.0"
class bms2bmson:
@staticmethod
def ToBaseX(str, shift):
a = str[0]
b = str[1]
c = 0
c += a - ord('A') + 10 if (a >= ord('A') and a <= ord('Z')) else a - ord('0')
c *= shift
c += b - ord('A') + 10 if (b >= ord('A') and b <= ord('Z')) else b - ord('0')
return c
@staticmethod
def LoadBMS(bmsfile):
bmsfilename = bmsfile
ext_formats = [".bms", ".bme", ".bml", ".pms"]
ext = os.path.splitext(os.path.basename(bmsfile))[1]
for ptr, format in enumerate(ext_formats):
if ext == format:
with open(bmsfile, "rb") as bmsdata:
return bmsdata.read()
return None
def ExportToJson(self):
bmson = {}
bmson["version"] = __bmsonversion__
bmson["info"] = self.BMSInfo
bmson["lines"] = self.lines
bmson["bpm_events"] = self.bpmnotes
bmson["stop_events"] = self.stopnotes
bmson["sound_channels"] = []
cnotes = {}
wavslen = len(self.wavHeader)
for i in range(wavslen):
cnotes[self.wavHeader[i]["ID"]] = []
for wn in self.notes:
if wn["id"] not in cnotes:
continue
n = {}
n["c"] = wn["channel"] > 30
if wn["channel"] is 1:
n["x"] = 0
else:
n["x"] = (wn["channel"]-10) % 30
n["y"] = wn["locate"]
n["l"] = wn["length"]
cnotes[wn["id"]].append(n)
for i in range(wavslen):
tempdict = {}
tempdict["name"] = self.wavHeader[i]["name"]
tempdict["notes"] = cnotes[self.wavHeader[i]["ID"]]
bmson["sound_channels"].append(tempdict)
bga = {}
bga["bga_header"] = self.bgaHeader
bga["bga_events"] = self.bbnotes
bga["layer_events"] = self.blnotes
bga["poor_events"] = self.bpnotes
bmson["bga"] = bga
self.bmson = bmson
def GetMetadata(self, bmsdata):
self.BMSInfo = { "title" : None,
"subtitle" : None,
"artist" : None,
"subartists" : None,
"genre" : None,
"mode_hint" : "beat-7k",
"chart_name" : None,
"level" : 0,
"init_bpm" : 0.0,
"total" : 100.0,
"back_image" : None,
"eyecatch_image" : None,
"banner_image" : None,
"preview_music" : None,
"resolution" : 240 }
self.wavHeader = []
self.bgaHeader = []
self.stopnum = {}
self.bpmnum = {}
tags = [ "ARTIST", "GENRE", "TITLE", "BPM", "TOTAL", "PLAYLEVEL" ]
extags = [ "WAV", "BMP", "BPM", "STOP" ]
for tag in tags:
value = re.search(b"#" + str.encode(tag) + b"\s(.+)\r", bmsdata)
if value is None:
continue
value = value.group(1)
if tag is "PLAYLEVEL" and value is not None:
self.BMSInfo["level"] = int(value)
elif tag is "BPM" and value is not None:
self.BMSInfo["init_bpm"] = float(value)
elif tag is "TOTAL" and value is not None:
self.BMSInfo["total"] = float(value)
elif (tag is "TITLE") or (tag is "GENRE") or (tag is "ARTIST"):
print(tag, value)
self.BMSInfo[tag.lower()] = str(value, 'shift-jis')
else:
pass
for tag in extags:
value = re.findall(b"#" + str.encode(tag) + b"([0-9A-Z]{2})\s(.+)\r", bmsdata)
if value is not None:
for v, parameter in value:
if tag is "WAV":
self.wavHeader.append({ "ID" : self.ToBaseX(v, 36), "name" : str(parameter, 'shift-jis') })
elif tag is "BMP":
self.bgaHeader.append({ "ID" : self.ToBaseX(v, 36), "name" : str(parameter, 'shift-jis') })
elif tag is "BPM":
self.bpmnum[self.ToBaseX(v, 36)] = float(parameter)
elif tag is "STOP":
self.stopnum[self.ToBaseX(v, 36)] = int(parameter)
return self.BMSInfo
def ReadBMSLines(self, bmsdata):
self.lineh = { i : 960 for i in range(1000) }
self.isln = { i : False for i in range(4096) }
self.lines = []
self.NotePre = []
self.linemax = 0
GlobalCounter = 0
bmslines = re.findall(b"#([0-9]{3})([0-9]{2}):(.+)\r", bmsdata)
for measure, channel, parameter in bmslines:
ch = int(channel)
ms = int(measure)
if ch >= 10 and ch < 70:
c = ch % 10
m = ch / 10
if c == 6: c = 8
elif c == 7: c = 9
elif c == 8: c = 6
elif c == 9: c = 7
ch = m * 10 + c
if ch == 2:
self.lineh[ms] = int(960 * float(parameter))
else:
paramlen = len(parameter) // 2
for j in range(paramlen):
paramsub = parameter[j*2:j*2+2]
nn = self.ToBaseX(paramsub, 16) if ch == 3 else self.ToBaseX(paramsub, 36)
if nn is not 0:
self.linemax = max([self.linemax, ms + 1])
self.NotePre.append({"x" : ch, "y" : 0, "n" : nn, "ms" : ms, "mm" : paramlen, "mc" : j})
y = 0
for i in range(self.linemax + 1):
self.lines.append({"y" : y})
y += self.lineh[i]
for i in range(len(self.NotePre)):
ms = self.NotePre[i]["ms"]
seq_y = (self.lines[ms+1]["y"] - self.lines[ms]["y"]) * self.NotePre[i]["mc"] / self.NotePre[i]["mm"]
self.NotePre[i]["y"] = self.lines[ms]["y"] + seq_y
self.NotePre = sorted(self.NotePre, key=lambda k: k['y'])
for i in range(len(self.NotePre)):
"""
Longnote Processor
"""
ch = self.NotePre[i]['x']
if (ch > 10 and ch < 50) and self.isln[self.NotePre[i]['n']]:
pln = i
while pln - 1 >= 0:
pln = pln - 1
ch2 = self.NotePre[pln]['x']
if ch == ch2:
self.NotePre.append({ "x" : self.NotePre[pln]['x'],
"y" : self.NotePre[pln]['y'],
"n" : self.NotePre[pln]['n'],
"ms" : 0,
"mm" : 0,
"mc" : 0 })
break
if (ch > 50 and ch < 70):
pln = i
while pln + 1 < len(self.NotePre):
pln = pln + 1
ch2 = self.NotePre[pln]['x']
if ch == ch2:
self.NotePre[i]['length'] = self.NotePre[pln]['y']
self.NotePre[i]['x'] -= 40
self.NotePre[pln]['x'] = 0
break
TempNotePre = [r for r in self.NotePre if r['x'] != 0]
self.NotePre = sorted(TempNotePre, key=lambda k: k['y'])
self.SetNotes()
def SetNotes(self):
self.notes = []
self.bbnotes = []
self.blnotes = []
self.bpnotes = []
self.bpmnotes = []
self.stopnotes = []
for i, np in enumerate(self.NotePre):
if np['x'] in [4, 6, 7]:
bn = { 'y' : np['y'],
'id' : np['n'] }
if np['x'] == 4:
self.bbnotes.append(bn)
elif np['x'] == 6:
self.bpnotes.append(bn)
elif np['x'] == 7:
self.blnotes.append(bn)
if (np['x'] == 1) or ((np['x'] / 10 >= 1) and (np['x'] / 10 <= 4)):
n = { "channel" : np['x'],
"id" : np['n'],
"locate" : np['y'],
"length" : np.get('length', 0) }
self.notes.append(n)
else:
en = { "y" : np['y'] }
if np['x'] == 3:
en['v'] = float(np['n'])
self.bpmnotes.append(en)
elif np['x'] == 8:
en['v'] = self.bpmnum[np['n']]
self.bpmnotes.append(en)
elif np['x'] == 9:
en['v'] = self.stopnum[np['n']]
self.stopnotes.append(en)
def Convert(self, file):
bmsdata = self.LoadBMS(file)
self.GetMetadata(bmsdata)
self.ReadBMSLines(bmsdata)
self.ExportToJson()

Binary file not shown.

View File

@ -0,0 +1,32 @@
import os
import sys
input_filename = sys.argv[1]
start_note = int(sys.argv[2])
end_note = int(sys.argv[3])
highlight_flag = int(sys.argv[4])
data = bytearray(open(input_filename, "rb").read())
header = data[:0x100]
type1_count = list(set([header[i] for i in range(4, len(header), 8) if header[i] != 0]))
type2_count = list(set([header[i] for i in range(4, len(header), 12) if header[i] != 0]))
is_type2 = len(type1_count) != 1
event_size = 12 if is_type2 else 8
events = []
note_count = 0
for i in range(0, len(data), event_size):
event_data = data[i:i+event_size]
if event_data[5] == 1:
note_count += 1
if note_count >= start_note and note_count <= end_note:
event_data[7] = highlight_flag
events.append(event_data)
open(input_filename, "wb").write(b"".join(events))

View File

@ -0,0 +1,979 @@
import argparse
import copy
import glob
import itertools
import json
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import pydub
from bmx2bmson import bms2bmson
from PIL import Image
from lxml.etree import tostring, fromstring, XMLParser, parse as etree_parse
from lxml.builder import E
on_wsl = "microsoft" in platform.uname()[3].lower()
def insensitive_glob(pattern):
def either(c):
return '[%s%s]' % (c.lower(), c.upper()) if c.isalpha() else c
return glob.glob(''.join(map(either, pattern)))
def calculate_timestamps(bmson, note_y):
timestamp_at_y = {
0: 0,
}
# Calculate timestamps based on pulses
cur_bpm = bmson['info']['init_bpm']
cur_bpm_pulse = 0
new_timestamps = []
last_event = None
for bpm_event in bmson['bpm_events']:
last_y = 0 if not last_event else last_event['y']
last_bpm = bmson['info']['init_bpm'] if not last_event else last_event['v']
time_per_pulse = ((60 / last_bpm) * 4) / 960
timestamp_at_y[bpm_event['y']] = timestamp_at_y[last_y] + (bpm_event['y'] - last_y) * time_per_pulse
new_timestamps.append(bpm_event['y'])
last_event = bpm_event
for y in note_y:
time_per_pulse = ((60 / cur_bpm) * 4) / 960
timestamp_at_y[y] = timestamp_at_y[cur_bpm_pulse] + (y - cur_bpm_pulse) * time_per_pulse
new_timestamps.append(y)
for bpm_event in bmson['bpm_events']:
if y >= bpm_event['y']:
cur_bpm = bpm_event['v']
cur_bpm_pulse = bpm_event['y']
return { k: round(timestamp_at_y[k] * 1000) for k in timestamp_at_y }
def bpm_at_offset(bmson, offset):
cur_bpm = bmson['info']['init_bpm']
for event in bmson['bpm_events']:
if offset > event['y']:
cur_bpm = event['v']
return cur_bpm
def generate_konami_chart_from_bmson(bmson, keysounds_list, song_total_duration=None):
# import json
# print(json.dumps(bmson, indent=4))
# exit(1)
end_timestamp = 0
end_measure = {
'y': bmson['lines'][-1]['y'] + (bmson['lines'][-1]['y'] - bmson['lines'][-2]['y'])
}
bmson['lines'].append(end_measure)
note_y = [x['y'] for x in list(itertools.chain(*[ks['notes'] for ks in bmson['sound_channels']]))]
note_y = [x.get('l', 0) for x in list(itertools.chain(*[ks['notes'] for ks in bmson['sound_channels']]))]
note_y += [x['y'] for x in bmson['lines']]
note_y += [x['y'] for x in bmson['bpm_events']]
# Timestamps for beats
for idx, _ in enumerate(bmson['lines'][:-1]):
cur_line = bmson['lines'][idx]
next_line = bmson['lines'][idx+1]
line_diff = next_line['y'] - cur_line['y']
beats_per_measure = line_diff / bmson['info']['resolution']
for i in range(int(beats_per_measure)):
note_y.append(cur_line['y'] + (line_diff / beats_per_measure) * i)
end_timestamp = cur_line['y'] + (line_diff / beats_per_measure) * (i + 1)
real_note_y = sorted(list(set([float(x) for x in note_y])))
for i in range(0, round(end_timestamp)):
# TODO: Slow but working
# This is required because the code to bump the timestamp when a keysound is played and loaded at the same time
# uses non-existing timestamps
note_y.append(i)
note_y = sorted(list(set([float(x) for x in note_y])))
# Pick up any left over y positions not found
for sound in bmson['sound_channels']:
for note in sound['notes']:
if note['y'] not in note_y:
note_y.append(note['y'])
real_timestamps = calculate_timestamps(bmson, real_note_y)
timestamps = calculate_timestamps(bmson, note_y)
events = []
# Measure line events
last_measure_timestamp = 0
for line_event in bmson['lines']:
events.append({
'name': "measure",
'timestamp': timestamps[line_event['y']]
})
last_measure_timestamp = timestamps[line_event['y']]
# Beat line events
for idx, _ in enumerate(bmson['lines'][:-1]):
cur_line = bmson['lines'][idx]
next_line = bmson['lines'][idx+1]
line_diff = next_line['y'] - cur_line['y']
beats_per_measure = line_diff / bmson['info']['resolution']
for i in range(int(beats_per_measure)):
y = cur_line['y'] + (line_diff / beats_per_measure) * i
events.append({
'name': "beat",
'timestamp': timestamps[y]
})
# BPM events
events.append({
'name': "bpm",
'timestamp': 0,
'bpm': round(bmson['info']['init_bpm'])
})
for bpm_event in bmson['bpm_events']:
events.append({
'name': "bpm",
'timestamp': timestamps[bpm_event['y']],
'bpm': round(bpm_event['v'])
})
# Time signature event
# TODO: When exactly would this not be 4/4? pop'n 8 egypt has 3/4 or 4/3 (not sure which) but does it make any difference in-game?
events.append({
'name': "timesig",
'timestamp': 0,
'top': 4,
'bottom': 4,
})
# End event
events.append({
'name': "end",
'timestamp': timestamps[end_timestamp] if not song_total_duration else song_total_duration,
})
# Timing window stuff (How does this translate exactly? Frames?)
timings = [
0x76, # Early bad
0x7a, # Early good
0x7e, # Early great
0x84, # Late great
0x88, # Late good
0x8c, # Late bad
]
for idx, timing in enumerate(timings):
events.append({
'name': "timing",
'timestamp': 0,
'timing': timing,
'timing_slot': idx
})
# Unknown event that all charts seem to have
events.append({
'name': "unk",
'timestamp': 0,
'value': 10,
})
# Key events
button_mapping = {
2.0: 0,
4.0: 1,
6.0: 2,
8.0: 3,
10.0: 4,
14.0: 5,
16.0: 6,
18.0: 7,
20.0: 8,
}
# The note events need to be in order or else the cur_button_loaded state won't be correct
note_events = []
for sound in bmson['sound_channels']:
sound['name'] = sound['name'].lower()
for note in sound['notes']:
note_events.append((sound, note))
note_events = sorted(note_events, key=lambda x:x[1]['y'])
load_latest_window_offset = ((timings[1] - 128) * 16) - 16 # Early good
load_latest_window_offset += -2 if load_latest_window_offset < 0 else 2
load_window_bump_offset = ((timings[3] - 128) * 16) - 16 # Late great
load_window_bump_offset += -2 if load_window_bump_offset < 0 else 2
note_load_events = []
cur_button_loaded = {}
initial_keysounds = { k: 0 for k in range(0, 9) }
for sound, note in note_events:
if note['x'] not in button_mapping:
if note['x'] != 0:
print("Unknown button!", note)
exit(1)
else:
events.append({
'name': "sample2",
'timestamp': timestamps[note['y']],
'value': keysounds_list.index(sound['name']) + 1,
'key': 8, # TODO: What is this supposed to be exactly?
})
else:
events.append({
'name': "key",
'timestamp': timestamps[note['y']],
'key': button_mapping[note['x']],
'length': timestamps[note['l']] - timestamps[note['y']] if note.get('l', 0) > 0 else 0,
'_note': note,
'_filename': sound['name']
})
if button_mapping[note['x']] not in cur_button_loaded or sound['name'] != cur_button_loaded[button_mapping[note['x']]]:
# Find suitable timestamp to load keysound
# There is a specific sweet spot window
load_earliest_window = round(timestamps[note['y']] - (30000 / bpm_at_offset(bmson, note['y']))) # 1/2 of a beat of a measure at the current BPM
load_latest_window = round(timestamps[note['y']] + load_latest_window_offset) # At the latest, load the keysound before the earliest part of an early good window
candidate_timestamp = 0
if load_earliest_window >= 0:
for k in timestamps:
if timestamps[k] >= load_earliest_window and k <= note['y']:
candidate_timestamp = k
break
for event in events:
if event.get('_note', None) == note:
continue
if event['timestamp'] >= timestamps[candidate_timestamp] and event['name'] == "key" and event['key'] == button_mapping[note['x']]:
# Bump timestamp when it would try to load a keysound in the same slot that's being played at the same timestamp
target = timestamps[candidate_timestamp] + load_window_bump_offset
while timestamps[candidate_timestamp] < target:
candidate_timestamp += 1
if candidate_timestamp == 0:
initial_keysounds[note['key']] = keysounds_list.index(sound['name']) + 1
else:
if timestamps[candidate_timestamp] >= load_latest_window:
diff = timestamps[candidate_timestamp] - load_latest_window
print("Potential issue with keysound load timing detected (%d ms off from sweet spot range):" % (diff))
print(timestamps[candidate_timestamp], load_latest_window)
print(sound['name'], note)
if diff < 50:
print("This has a high possibility of not loading this keysound in time for the button press")
else:
print("This may not cause issues in-game")
print()
events.append({
'name': "sample",
'timestamp': timestamps[candidate_timestamp],
'value': keysounds_list.index(sound['name']) + 1,
'key': button_mapping[note['x']],
})
cur_button_loaded[button_mapping[note['x']]] = sound['name']
# Initialize keysound samples
for i in range(0, 9):
events.append({
'name': "sample",
'timestamp': 0,
'value': initial_keysounds[i],
'key': i,
})
# Poor way of doing this, but I want the generated charts to be as close to official as possible including ordering of events
events_by_timestamp = {}
for event in sorted(events, key=lambda x:x['timestamp']):
if event['timestamp'] not in events_by_timestamp:
events_by_timestamp[event['timestamp']] = []
events_by_timestamp[event['timestamp']].append(event)
event_order = [
"bpm",
"timesig",
"unk",
"timing",
"key",
"sample",
"sample2",
"measure",
"beat",
"end",
]
events_ordered = []
for timestamp in events_by_timestamp:
for event_name in event_order:
events_by_name = []
for event in events_by_timestamp[timestamp]:
if event['name'] == event_name:
events_by_name.append(event)
events_ordered += sorted(events_by_name, key=lambda x:x.get('key', 0))
return events_ordered
def bmson_has_long_notes(bmson):
for sound in bmson['sound_channels']:
for note in sound['notes']:
if note.get('l', 0) > 0:
return True
return False
def write_chart(events, output_filename, new_format):
with open(output_filename, "wb") as outfile:
bytecode_lookup = {
"key": 0x0145,
"sample": 0x0245,
"unk": 0x0345,
"bpm": 0x0445,
"timesig": 0x0545,
"end": 0x0645,
"sample2": 0x0745,
"timing": 0x0845,
"measure": 0x0a00,
"beat": 0x0b00,
}
for event in events:
if event['timestamp'] < 0:
continue
outfile.write(int.to_bytes(event['timestamp'], 4, 'little'))
outfile.write(int.to_bytes(bytecode_lookup[event['name']], 2, 'little'))
if event['name'] == "bpm":
outfile.write(int.to_bytes(event['bpm'], 2, 'little'))
elif event['name'] == "timesig":
outfile.write(int.to_bytes(event['bottom'], 1, 'little'))
outfile.write(int.to_bytes(event['top'], 1, 'little'))
elif event['name'] == "timing":
outfile.write(int.to_bytes(event['timing'] | (event['timing_slot'] << 12), 2, 'little'))
elif event['name'] == "unk":
outfile.write(int.to_bytes(event['value'], 2, 'little'))
elif event['name'] == "key":
outfile.write(int.to_bytes(event['key'], 1, 'little'))
note_flag = 4 if new_format and event['length'] != 0 else 0
outfile.write(int.to_bytes(note_flag, 1, 'little'))
elif event['name'] in ["sample", "sample2"]:
outfile.write(int.to_bytes(event['value'] | (event['key'] << 12), 2, 'little'))
elif event['name'] in ["measure", "beat", "end"]:
outfile.write(int.to_bytes(0, 2, 'little'))
else:
print("Unknown name:", event['name'])
exit(1)
if new_format:
if event['name'] in ['key'] and event['length'] > 0:
outfile.write(int.to_bytes(event['length'], 4, 'little'))
else:
outfile.write(int.to_bytes(0, 4, 'little'))
def generate_wav(input_filename):
new_filename = os.path.join("tmp", next(tempfile._get_candidate_names()) + ".wav")
# Works for me in Windows and WSL with sox installed in Windows PATH
# Remove .exe for Linux/Mac OS
if os.path.exists("sox.exe") and on_wsl:
os.system("""./sox.exe -G -S "%s" -e ms-adpcm "%s" """ % (input_filename, new_filename))
else:
os.system("""sox.exe -G -S "%s" -e ms-adpcm "%s" """ % (input_filename, new_filename))
return new_filename
def generate_2dx(input_filenames, output_filename):
# Based on mon's 2dxTools
with open(output_filename, "wb") as outfile:
# Write header
header_title = os.path.splitext(os.path.basename(output_filename))[0][:16].encode('ascii')
if len(header_title) < 16:
header_title += b"\0" * (16 - len(header_title))
file_offset = 0x48 + len(input_filenames) * 4
outfile.write(header_title)
outfile.write(int.to_bytes(file_offset, 4, 'little'))
outfile.write(int.to_bytes(len(input_filenames), 4, 'little'))
outfile.write(b"\0" * 0x30)
for filename in input_filenames:
outfile.write(int.to_bytes(file_offset, 4, 'little'))
if not filename:
continue
file_offset += os.path.getsize(filename) + 24 # Size of file header
for filename in input_filenames:
if not filename:
continue
data = open(filename, "rb").read()
outfile.write(b"2DX9")
outfile.write(int.to_bytes(24, 4, 'little')) # Header size
outfile.write(int.to_bytes(len(data), 4, 'little')) # Wave data size
outfile.write(int.to_bytes(0x3231, 2, 'little')) # Always 0x3231
outfile.write(int.to_bytes(0xffff, 2, 'little')) # trackId "always -1 for previews, 0-7 for song + effected versions, 9 to 11 used for a few effects"
outfile.write(int.to_bytes(64, 2, 'little')) # "all 64, except song selection change 'click' is 40"
outfile.write(int.to_bytes(1, 2, 'little')) # "0-127 for varying quietness"
outfile.write(int.to_bytes(0, 4, 'little')) # "sample to loop at * 4"
outfile.write(data)
def export_2dx(keysounds, output_filename):
os.makedirs("tmp", exist_ok=True)
temp_filenames = [None] * len(keysounds)
try:
for idx, input_filename in enumerate(keysounds):
if not input_filename or not os.path.exists(input_filename):
if input_filename:
print("Couldn't find", input_filename)
exit(1)
continue
new_filename = generate_wav(input_filename)
if os.path.exists(new_filename):
temp_filenames[idx] = new_filename
else:
print("Couldn't find", input_filename)
exit(1)
generate_2dx(temp_filenames, output_filename)
finally:
for filename in temp_filenames:
if filename and os.path.exists(filename):
os.remove(filename)
def generate_render(input_filename, output_filename):
if not os.path.exists("bmx2wavc.exe"):
print("bmx2wavc.exe is required to generate previews")
exit(1)
if on_wsl:
os.system("""./bmx2wavc.exe "%s" "%s" """ % (input_filename, output_filename))
else:
os.system("""bmx2wavc.exe "%s" "%s" """ % (input_filename, output_filename))
def get_duration(input_filename):
if not os.path.exists(input_filename):
return None
sound_file = pydub.AudioSegment.from_file(input_filename)
return len(sound_file)
def generate_preview(input_filename, output_filename, offset, duration):
sound_file = pydub.AudioSegment.from_file(input_filename)
if offset < 0:
# Set offset of preview to middle of song
offset = len(sound_file) / 2 / 1000
sound_file = sound_file[offset * 1000 : (offset + duration) * 1000]
sound_file = sound_file.fade_out(500)
sound_file.export(output_filename, format="wav")
return output_filename
def get_real_keysound_filename(input_filename, keysounds_folder):
if not input_filename:
return None
target_path = os.path.join(keysounds_folder, input_filename)
if os.path.exists(target_path):
# The file exists already
return target_path
# The file doesn't exist, so try to match it with other extensions
target_path = os.path.join(keysounds_folder, "%s.*" % (os.path.splitext(input_filename)[0]))
found_files = insensitive_glob(target_path)
if found_files:
return found_files[0]
print("Couldn't find", input_filename)
exit(1)
return None
def create_banner(output_path, musicid, banner_filename):
banner_image = Image.open(banner_filename)
if banner_image.size != (244, 58):
print("Banner must be 244x58! Found", banner_image.size)
exit(1)
banner_name = "kc_%04d" % (musicid)
banner_output_folder = os.path.join(output_path, banner_name)
os.makedirs(os.path.join(banner_output_folder, "tex"), exist_ok=True)
open(os.path.join(banner_output_folder, "magic"), "wb").write(b"NGPF")
open(os.path.join(banner_output_folder, "cversion"), "wb").write(b"1.3.72\0")
banner_xml = E.texturelist(
E.texture(
E.size(
"256 64",
__type="2u16",
),
E.image(
E.uvrect(
"2 490 2 118",
__type="4u16"
),
E.imgrect(
"0 492 0 120",
__type="4u16"
),
name=banner_name
),
format="argb8888rev",
mag_filter="nearest",
min_filter="nearest",
name="tex000",
wrap_s="clamp",
wrap_t="clamp",
),
compress="avslz",
)
tex_path = os.path.join(banner_output_folder, "tex")
open(os.path.join(tex_path, "texturelist.xml"), "wb").write(tostring(banner_xml, pretty_print=True, method='xml', encoding='utf-8', xml_declaration=True))
if banner_image.size == (244, 58):
# Duplicate the edge pixels
new_banner_image = Image.new('RGBA', (banner_image.width + 2, banner_image.height + 2))
new_banner_image.paste(banner_image, (1, 1))
new_banner_image.paste(banner_image.crop((0, 0, banner_image.width, 1)), (1, 0)) # Top
new_banner_image.paste(banner_image.crop((0, banner_image.height - 1, banner_image.width, banner_image.height)), (1, banner_image.height + 1)) # Bottom
# new_banner_image.paste(banner_image.crop((1, 0, 2, banner_image.height)), (0, 1)) # Left
new_banner_image.paste(banner_image.crop((banner_image.width - 1, 0, banner_image.width, banner_image.height)), (banner_image.width + 1, 1)) # Right
banner_image = new_banner_image
if banner_image.size not in [(246, 60)]:
print("Unknown banner size", banner_filename, banner_image.size)
exit(1)
banner_image.save(os.path.join(tex_path, banner_name + ".png"))
return banner_name
def create_bg(output_path, musicid, bg_filename):
bg_image = Image.open(bg_filename)
if bg_image.size != (128, 256):
print("Background must be 128x256! Found", bg_image.size)
exit(1)
bg_name = "bg_%04d" % (musicid)
bg_output_folder = os.path.join(output_path, bg_name)
os.makedirs(os.path.join(bg_output_folder, "tex"), exist_ok=True)
open(os.path.join(bg_output_folder, "magic"), "wb").write(b"NGPF")
open(os.path.join(bg_output_folder, "cversion"), "wb").write(b"1.3.72\0")
bg_xml = E.texturelist(
E.texture(
E.size(
"256 512",
__type="2u16",
),
E.image(
E.uvrect(
"2 258 2 514",
__type="4u16"
),
E.imgrect(
"0 260 0 516",
__type="4u16"
),
name=bg_name
),
format="argb8888rev",
mag_filter="nearest",
min_filter="nearest",
name="tex000",
wrap_s="clamp",
wrap_t="clamp",
),
compress="avslz",
)
tex_path = os.path.join(bg_output_folder, "tex")
open(os.path.join(tex_path, "texturelist.xml"), "wb").write(tostring(bg_xml, pretty_print=True, method='xml', encoding='utf-8', xml_declaration=True))
if bg_image.size == (128, 256):
# Duplicate the edge pixels
new_bg_image = Image.new('RGBA', (bg_image.width + 2, bg_image.height + 2))
new_bg_image.paste(bg_image, (1, 1))
new_bg_image.paste(bg_image.crop((0, 0, bg_image.width, 1)), (1, 0)) # Top
new_bg_image.paste(bg_image.crop((0, bg_image.height - 1, bg_image.width, bg_image.height)), (1, bg_image.height + 1)) # Bottom
# new_bg_image.paste(bg_image.crop((1, 0, 2, bg_image.height)), (0, 1)) # Left
new_bg_image.paste(bg_image.crop((bg_image.width - 1, 0, bg_image.width, bg_image.height)), (bg_image.width + 1, 1)) # Right
bg_image = new_bg_image
if bg_image.size not in [(130, 258)]:
print("Unknown background size", bg_filename, bg_image.size)
exit(1)
bg_image.save(os.path.join(tex_path, bg_name + ".png"))
return bg_name
def create_hariai(output_path, musicid, hariai_filename):
hariai_image = Image.open(hariai_filename)
if hariai_image.size != (248, 320):
print("hariai must be 248x320! Found", hariai_image.size)
exit(1)
hariai_name = "ha_%04d" % (musicid)
hariai_output_folder = os.path.join(output_path, hariai_name)
os.makedirs(os.path.join(hariai_output_folder, "tex"), exist_ok=True)
open(os.path.join(hariai_output_folder, "magic"), "wb").write(b"NGPF")
open(os.path.join(hariai_output_folder, "cversion"), "wb").write(b"1.3.72\0")
hariai_xml = E.texturelist(
E.texture(
E.size(
"256 512",
__type="2u16",
),
E.image(
E.uvrect(
"2 498 2 642",
__type="4u16"
),
E.imgrect(
"0 500 0 644",
__type="4u16"
),
name=hariai_name
),
format="argb8888rev",
mag_filter="nearest",
min_filter="nearest",
name="tex000",
wrap_s="clamp",
wrap_t="clamp",
),
compress="avslz",
)
tex_path = os.path.join(hariai_output_folder, "tex")
open(os.path.join(tex_path, "texturelist.xml"), "wb").write(tostring(hariai_xml, pretty_print=True, method='xml', encoding='utf-8', xml_declaration=True))
if hariai_image.size == (248, 320):
# Duplicate the edge pixels
new_hariai_image = Image.new('RGBA', (hariai_image.width + 2, hariai_image.height + 2))
new_hariai_image.paste(hariai_image, (1, 1))
new_hariai_image.paste(hariai_image.crop((0, 0, hariai_image.width, 1)), (1, 0)) # Top
new_hariai_image.paste(hariai_image.crop((0, hariai_image.height - 1, hariai_image.width, hariai_image.height)), (1, hariai_image.height + 1)) # Bottom
# new_hariai_image.paste(hariai_image.crop((1, 0, 2, hariai_image.height)), (0, 1)) # Left
new_hariai_image.paste(hariai_image.crop((hariai_image.width - 1, 0, hariai_image.width, hariai_image.height)), (hariai_image.width + 1, 1)) # Right
hariai_image = new_hariai_image
if hariai_image.size not in [(250, 322)]:
print("Unknown hariai size", hariai_filename, hariai_image.size)
exit(1)
hariai_image.save(os.path.join(tex_path, hariai_name + ".png"))
return hariai_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
difficulties = ['bp', 'ep', 'np', 'hp', 'op']
for difficulty in difficulties:
parser.add_argument('--input-%s' % difficulty, help='Input file (%s)' % difficulty.upper(), default=None)
parser.add_argument('--output', help='Output folder', default="output")
parser.add_argument('--name', help='Base name used for output', default=None, required=True)
parser.add_argument('--musicid', help='Music ID used for the database file', required=True, type=int)
parser.add_argument('--keysounds-folder', help='Input folder containing keysounds', default=None, required=True)
parser.add_argument('--preview', help='Input preview file (optional, overrides preview generation code)', default=None)
parser.add_argument('--new', help='New chart format which supports hold notes', default=False, action='store_true')
parser.add_argument('--banner', help='Banner image (optional, must be 244x58)', default=None)
parser.add_argument('--bg', help='Background image (optional, must be 128x256)', default=None)
parser.add_argument('--hariai', help='Hariai image (optional, must be 248x320)', default=None)
parser.add_argument('--metadata-fw-title', help='Fullwidth music title for database', default=None)
parser.add_argument('--metadata-fw-artist', help='Fullwidth music artist for database', default=None)
parser.add_argument('--metadata-fw-genre', help='Fullwidth music genre for database', default=None)
parser.add_argument('--metadata-title', help='Music title for database', default=None)
parser.add_argument('--metadata-artist', help='Music artist for database', default=None)
parser.add_argument('--metadata-genre', help='Music genre for database', default=None)
parser.add_argument('--metadata-chara1', help='Chara1 for database', default=None)
parser.add_argument('--metadata-chara2', help='Chara2 for database', default=None)
parser.add_argument('--metadata-has-battle-hyper', help='Battle Hyper flag for database', default=False, action='store_true')
parser.add_argument('--metadata-hariai-is-jacket', help='Jacket mask flag for database', default=False, action='store_true')
parser.add_argument('--metadata-folder', help='Folder entry for database', default=0, type=int)
parser.add_argument('--metadata-categories', help='Categories entry for database', default=0, type=int)
parser.add_argument('--metadata-cs-version', help='CS version entry for database', default=0, type=int)
parser.add_argument('--metadata-mask', help='Base mask value for database', default=0, type=int)
parser.add_argument('--metadata-chara-x', help='Chara X entry for database', default=0, type=int)
parser.add_argument('--metadata-chara-y', help='Chara Y entry for database', default=0, type=int)
if os.path.exists("bmx2wavc.exe"):
parser.add_argument('--preview-offset', help='Offset from start in seconds (ex. 10.4 would be 10.4 seconds)', default=-1, type=float)
parser.add_argument('--preview-duration', help='Length of preview in seconds', default=10, type=float)
args = parser.parse_args()
args_vars = vars(args)
if args.musicid < 4000:
print("Music ID must be >= 4000")
exit(1)
output_path = os.path.join(args.output, args.name)
mask = args.metadata_mask
charts_xml = []
# Generate list of keysounds based on input charts
bms_charts = []
chart_filenames = []
battle_chart = None
for difficulty in difficulties:
if not args_vars.get('input_%s' % difficulty, None):
continue
output_filename = os.path.join(output_path, "%s_%s.bin" % (args.name, difficulty))
bms = bms2bmson()
bms.Convert(args_vars['input_%s' % difficulty])
bms_charts.append((bms, output_filename))
has_hold_notes = bmson_has_long_notes(bms.bmson)
optional = []
if has_hold_notes or args.new:
optional.append(
E.force_new_chart_format("1", __type="u32")
)
args.new = True # In case the song has long notes and the user forgot to set the new flag, upgrade it automatically
chart = E.chart(
E.folder("custom", __type="str"),
E.filename(args.name, __type="str"),
E.audio_param1("0", __type="s32"),
E.audio_param2("0", __type="s32"),
E.audio_param3("0", __type="s32"),
E.audio_param4("0", __type="s32"),
E.file_type("0", __type="u32"),
E.used_keys("0", __type="u16"),
E.diff("1", __type="u8"),
E.hold_flag("1" if has_hold_notes else "0", __type="u8"),
idx=str(difficulty),
*optional
)
charts_xml.append(chart)
if difficulty == "bp":
battle_chart = chart
chart_filenames.append(args_vars['input_%s' % difficulty])
if args.metadata_has_battle_hyper and battle_chart is not None:
chart = copy.deepcopy(battle_chart)
chart.set('idx', 'bp_h')
charts_xml.append(chart)
# Generate list of keysounds used in the input charts to create the keysound .2dx
keysounds_list = []
for bms, _ in bms_charts:
for keysound in sorted(bms.wavHeader, key=lambda x:x['ID']):
if keysound['name'] not in keysounds_list:
keysounds_list.append(keysound['name'].lower())
# Render chart so it can be used to find the true length of the song and also later for preview generation if required
render_filename = os.path.join("tmp", "%s_full.wav" % args.name)
generate_render(chart_filenames[-1], render_filename)
song_total_duration = get_duration(render_filename)
os.makedirs(output_path, exist_ok=True)
for bms, output_filename in bms_charts:
write_chart(generate_konami_chart_from_bmson(bms.bmson, keysounds_list, song_total_duration), output_filename, new_format=args.new)
real_keysound_filenames = [(x, get_real_keysound_filename(x, args.keysounds_folder)) for x in keysounds_list]
export_2dx([x[1] for x in real_keysound_filenames], os.path.join(output_path, "%s.2dx" % args.name))
if args.preview:
# Create a _pre.2dx if a preview is specified
export_2dx([args.preview], os.path.join(output_path, "%s_pre.2dx" % args.name))
else:
preview_filename = os.path.join("tmp", "%s_pre.wav" % args.name)
if not render_filename or not os.path.exists(render_filename):
render_filename = os.path.join("tmp", "%s_full.wav" % args.name)
generate_render(chart_filenames[-1], render_filename)
generate_preview(render_filename, preview_filename, args.preview_offset, args.preview_duration)
export_2dx([preview_filename], os.path.join(output_path, "%s_pre.2dx" % args.name))
os.unlink(preview_filename)
if os.path.exists(render_filename):
os.unlink(render_filename)
tex_files = {}
if args.banner:
# Create banner folder
tex_files['kc_mod'] = create_banner(output_path, args.musicid, args.banner)
if args.hariai:
# Create hariai folder
tex_files['ha_mod'] = create_hariai(output_path, args.musicid, args.hariai)
mask |= 0x00800000 # Required for songs that show a hariai image on the music selection screen
if args.bg:
# Create background folder
tex_files['bg_mod'] = create_bg(output_path, args.musicid, args.bg)
if args.metadata_hariai_is_jacket:
mask |= 0x00000020 # The alternate hariai image (set by using 0x800000) is a song jacket instead of a character portrait
xml = E.music(
E.fw_genre(args.metadata_fw_genre if args.metadata_fw_genre else "", __type="str"),
E.fw_title(args.metadata_fw_title if args.metadata_fw_title else "", __type="str"),
E.fw_artist(args.metadata_fw_artist if args.metadata_fw_artist else "", __type="str"),
E.genre(args.metadata_genre if args.metadata_genre else "", __type="str"),
E.title(args.metadata_title if args.metadata_title else "", __type="str"),
E.artist(args.metadata_artist if args.metadata_artist else "", __type="str"),
E.chara1(args.metadata_chara1 if args.metadata_chara1 else "", __type="str"),
E.chara2(args.metadata_chara2 if args.metadata_chara2 else "", __type="str"),
E.mask(str(mask), __type="u32"),
E.folder(str(args.metadata_folder), __type="u32"),
E.cs_version(str(args.metadata_cs_version), __type="u32"),
E.categories(str(args.metadata_categories), __type="u32"),
E.charts(*charts_xml),
E.ha(tex_files.get('ha_mod', ""), __type="str"),
E.chara_x(str(args.metadata_chara_x), __type="u32"),
E.chara_y(str(args.metadata_chara_y), __type="u32"),
E.unk1("0 0 0 0 0 0 36 0 0 59 77 0 0 0 0 134 0 0 68 67 222 0 0 0 0 0 0 0 0 0 0 0", __type="u16", __count="32"),
E.display_bpm(" ".join([str(x) for x in [0] * 12]), __type="u16", __count="12"),
id=str(args.musicid)
)
db_path = os.path.join(args.output, "db")
os.makedirs(db_path, exist_ok=True)
output_xml_path = os.path.join(db_path, "custom_musicdb.xml")
# Try to read in existing database and merge if possible
if os.path.exists(output_xml_path):
print("Merging databases")
xml_full = etree_parse(output_xml_path, XMLParser(remove_blank_text=True)).getroot()
remove = []
musicid_str = "%04d" % args.musicid
for entry in xml_full.findall('music'):
if entry.get('id') == musicid_str:
remove.append(entry)
for entry in remove:
xml_full.remove(entry)
xml_full.append(xml)
xml = xml_full
else:
xml = E.database(
xml
)
open(output_xml_path, "wb").write(tostring(xml, method='xml', encoding='cp932', xml_declaration=True))
xml = etree_parse(output_xml_path, XMLParser(remove_blank_text=True)).getroot()
open(output_xml_path, "wb").write(tostring(xml, pretty_print=True, method='xml', encoding='cp932', xml_declaration=True).replace(b"cp932", b"shift-jis"))
# Create .ifs instead of folder
target_output_path = output_path
for path in tex_files:
folder = tex_files[path]
target_output_path = os.path.join(args.output, "data", "tex", "system", path)
target_path = os.path.join(output_path, folder)
os.makedirs(target_output_path, exist_ok=True)
subprocess.call('"%s" -c "from ifstools import ifstools; ifstools.main()" -s --no-cache -y "%s" -o "%s"' % (sys.executable, target_path, target_output_path), shell=True)
shutil.rmtree(target_path)
target_output_path = os.path.join(args.output, "data", "sd", "custom")
os.makedirs(target_output_path, exist_ok=True)
subprocess.call('"%s" -c "from ifstools import ifstools; ifstools.main()" -s --no-cache -y "%s" -o "%s"' % (sys.executable, output_path, target_output_path), shell=True)
shutil.rmtree(output_path)

View File

@ -0,0 +1,2 @@
ifstools==1.13
pydub==0.23.1