This commit is contained in:
Cainan 2024-06-16 19:13:56 +01:00
parent f79bf4de51
commit c6ec0af7a0
64 changed files with 323127 additions and 0 deletions

View File

@ -0,0 +1,16 @@
# Taiko no Tatsujin - Font to Atlas Texture
Usage: font_to_atlas.py [-h] ttf_path font_size image_width image_height {ascii,unicode} output_name
Convert TTF font to texture and XML
Positional arguments:
ttf_path Path to the TTF font file
font_size Font size
image_width Width of the texture image
image_height Height of the texture image
{ascii,unicode} Character range
output_name Output name (e.g., en_64)
Should support any NU Library Taiko game which pairs a texture atlas and xml file for it's font. (NS1, PS4 and Arcade).
Possibly may also support non-Taiko NU Library titles also.

View File

@ -0,0 +1,26 @@
# Taiko no Tatsujin - Song Conversion Tool
Python based tool that can convert official songs over to some Taiko no Tatsujin games.
Supported Titles:
Nintendo Switch Version / Drum 'n' Fun v1.4.13 (Nintendo Switch)
Drum Session (Any Update) (PlayStation 4)
Pop Tap Beat (Any Update) (iOS/MacOS/Apple TV)
Tool has over 2000 songs to choose from, including the ability to listen to each songs preview too.
There's 3 options to sort songs by: ID (A-Z), Song Name (A-Z) and Genre
This is still a work in-progress, so please report any issues found to me, along with suggestions for features or game support.
Prerequisites:
Python 3.12.3 or newer
tkinter installed through pip `pip install tk`
cryptography installed through pip `pip install cryptography`
pydub installed through pip `pip install pydub`
ffplay installed in `path`.
Game Data properly converted to the format this tool expects, stored in a folder called `data`.
Due to copyright reasons, etc. no song data will be provided with this tool, however template data can be found within the `data` folder, which should give an idea of what the tool requires.
Currently, due to the nature of this relying on some Windows executables, this tool currently only supports Windows.
I will be looking into getting it running on Unix-based operating systems. (Linux/macOS)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,3 @@
{
"max_concurrent": 25
}

View File

@ -0,0 +1,70 @@
import argparse
import subprocess
import os
import sys
def convert_audio_to_nus3bank(input_audio, audio_type, game, preview_point, song_id):
# Determine the output filename for the nus3bank
output_filename = f"song_{song_id}.nus3bank"
converted_audio_file = f"{input_audio}.{audio_type}"
# Determine the path to the run.py script within the 'script' folder
templates_folder = os.path.join(os.path.dirname(__file__), 'script')
run_py_path = os.path.join(templates_folder, 'run.py')
# Prepare the command based on the audio type
if audio_type in ["bnsf", "at9", "idsp", "lopus", "wav"]:
# Construct the command to convert input audio to the specified type
conversion_command = ["python", run_py_path, audio_type, input_audio, f"{input_audio}.{audio_type}"]
# Construct the command to create the nus3bank
nus3_command = ["python", run_py_path, "nus3", game, f"{input_audio}.{audio_type}", str(preview_point), output_filename]
try:
# Execute the conversion command
subprocess.run(conversion_command, check=True)
# Execute the nus3 command
subprocess.run(nus3_command, check=True)
print(f"Conversion successful! Created {output_filename}")
# Delete the non-nus3bank file after successful conversion
if os.path.exists(converted_audio_file):
os.remove(converted_audio_file)
print(f"Deleted {converted_audio_file}")
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
else:
print(f"Unsupported audio type: {audio_type}")
def main():
# Create an argument parser
parser = argparse.ArgumentParser(description="Convert audio to nus3bank")
# Define command-line arguments
parser.add_argument("input_audio", type=str, nargs="?", help="Input audio file path.")
parser.add_argument("audio_type", type=str, nargs="?", help="Type of input audio (e.g., wav, bnsf, at9, idsp, lopus).")
parser.add_argument("game", type=str, nargs="?", help="Game type (e.g., nijiiro, ns1, ps4, wiiu3).")
parser.add_argument("preview_point", type=int, nargs="?", help="Audio preview point in ms.")
parser.add_argument("song_id", type=str, nargs="?", help="Song ID for the nus3bank file.")
# Parse the command-line arguments
args = parser.parse_args()
# If no arguments are provided, display usage information
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
# Validate input audio file path
if not args.input_audio:
print("Error: Input audio file path is required.")
parser.print_help()
sys.exit(1)
# Call function to convert audio to nus3bank
convert_audio_to_nus3bank(args.input_audio, args.audio_type, args.game, args.preview_point, args.song_id)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,36 @@
{
"items": [
{
"uniqueId": 1,
"id": "custom",
"songFileName": "sound/song_custom",
"order": 1000,
"genreNo": 0,
"branchEasy": false,
"branchNormal": false,
"branchHard": false,
"branchMania": false,
"branchUra": false,
"starEasy": 8,
"starNormal": 8,
"starHard": 8,
"starMania": 8,
"starUra": 0,
"shinutiEasy": 1253,
"shinutiNormal": 1253,
"shinutiHard": 1253,
"shinutiMania": 1253,
"shinutiUra": 0,
"shinutiEasyDuet": 1253,
"shinutiNormalDuet": 1253,
"shinutiHardDuet": 1253,
"shinutiManiaDuet": 1253,
"shinutiUraDuet": 0,
"scoreEasy": 1000000,
"scoreNormal": 1000000,
"scoreHard": 1000000,
"scoreMania": 1000000,
"scoreUra": 0
}
]
}

View File

@ -0,0 +1,6 @@
[
{
"id": "custom",
"previewPos": 68610
}
]

View File

@ -0,0 +1,36 @@
{"items":[
{
"key": "song_custom",
"japaneseText": "That That [CUSTOM SONG]",
"japaneseFontType": 0,
"englishUsText": "That That [CUSTOM SONG]",
"englishUsFontType": 1,
"chineseTText": "That That [CUSTOM SONG]",
"chineseTFontType": 0,
"koreanText": "That That [CUSTOM SONG]",
"koreanFontType": 0
},
{
"key": "song_sub_custom",
"japaneseText": "PSY, prod. & feat. SUGA of BTS",
"japaneseFontType": 0,
"englishUsText": "PSY, prod. & feat. SUGA of BTS",
"englishUsFontType": 1,
"chineseTText": "PSY, prod. & feat. SUGA of BTS",
"chineseTFontType": 0,
"koreanText": "PSY, prod. & feat. SUGA of BTS",
"koreanFontType": 0
},
{
"key": "song_detail_custom",
"japaneseText": "",
"japaneseFontType": 0,
"englishUsText": "",
"englishUsFontType": 0,
"chineseTText": "",
"chineseTFontType": 1,
"koreanText": "",
"koreanFontType": 2
}
]}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,960 @@
import tkinter as tk
from tkinter import ttk, messagebox
import json
import os
import subprocess
import shutil
import gzip
import concurrent.futures
import functools
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding
data_dir = "data/"
musicinfo_path = os.path.join(data_dir, "datatable", "musicinfo.json")
wordlist_path = os.path.join(data_dir, "datatable", "wordlist.json")
previewpos_path = os.path.join(data_dir, "datatable", "previewpos.json")
item_selection_state = {}
with open(musicinfo_path, "r", encoding="utf-8") as musicinfo_file:
music_info = json.load(musicinfo_file)
with open(wordlist_path, "r", encoding="utf-8") as wordlist_file:
word_list = json.load(wordlist_file)
genre_map = {
0: ("POP", "light blue"),
1: ("Anime", "orange"),
2: ("Vocaloid", "turquoise"),
3: ("Variety", "green"),
4: ("Unused", "gray"),
5: ("Classic", "dark red"),
6: ("Game Music", "purple"),
7: ("Namco Original", "dark orange"),
}
song_titles = {item["key"]: item["englishUsText"] for item in word_list["items"]}
song_subtitles = {item["key"]: item["englishUsText"] for item in word_list["items"]}
window = tk.Tk()
window.title("Taiko no Tatsujin Song Conversion GUI Tool")
# Set the initial size of the window
window.geometry("1000x600") # Width x Height
# Create Treeview and Scrollbar
tree = ttk.Treeview(window, columns=("Select", "Unique ID", "ID", "Song Name", "Song Subtitle", "Genre", "Difficulty"), show="headings")
tree.heading("Unique ID", text="")
tree.heading("ID", text="ID")
tree.heading("Song Name", text="Song Name")
tree.heading("Song Subtitle", text="Song Subtitle")
tree.heading("Genre", text="Genre")
tree.heading("Difficulty", text="Difficulty")
tree.heading("Select", text="Select")
tree.column("Select", width=50, anchor=tk.CENTER)
tree.column("Unique ID", width=0, anchor=tk.W)
tree.column("ID", width=60, anchor=tk.W)
tree.column("Song Name", anchor=tk.W)
tree.column("Song Subtitle", anchor=tk.W)
tree.column("Genre", width=100, anchor=tk.W)
tree.column("Difficulty", width=120, anchor=tk.W)
vsb = ttk.Scrollbar(window, orient="vertical", command=tree.yview)
tree.configure(yscrollcommand=vsb.set)
# Pack Treeview and Scrollbar
tree.pack(side="left", padx=10, pady=10, fill="both", expand=True)
vsb.pack(side="left", fill="y", padx=(0, 10), pady=10)
# Counter for selected items
selection_count = tk.IntVar()
selection_count.set(0) # Initial selection count
# Function to load configuration from file
def load_config():
config_file = "config.json"
default_config = {
"max_concurrent": 5, # Default value if not specified in config file
}
try:
with open(config_file, "r") as f:
config = json.load(f)
# Override default values with values from config file
default_config.update(config)
except FileNotFoundError:
print(f"Config file '{config_file}' not found. Using default configuration.")
return default_config
# Load configuration
config = load_config()
def on_search_keyrelease(event):
print("Key released:", event.keysym)
#filter_treeview()
# Create Search Entry
search_var = tk.StringVar()
search_entry = ttk.Entry(window, textvariable=search_var)
def toggle_checkbox(event):
# Get the item_id based on the event coordinates
item_id = tree.identify_row(event.y)
# Ensure item_id is valid and corresponds to a valid item in the tree
if item_id and tree.exists(item_id):
current_state = item_selection_state.get(item_id, "")
if current_state == "":
new_state = ""
elif current_state == "":
new_state = ""
# Update the selection state for the item
item_selection_state[item_id] = new_state
# Update the values in the treeview to reflect the new state
tree.item(item_id, values=(new_state,) + tree.item(item_id, "values")[1:])
# Update the selection count based on the state change
if new_state == "":
selection_count.set(selection_count.get() + 1) # Increment selection count
elif new_state == "":
selection_count.set(selection_count.get() - 1) # Decrement selection count
def filter_treeview():
search_text = search_var.get().strip().lower()
populate_tree(search_text) # Populate Treeview with filtered data
def populate_tree():
global selected_items # Use global variable to persist selection state
# Store currently selected items
current_selection = tree.selection()
# Clear existing items in the Treeview
tree.delete(*tree.get_children())
for song in sorted(music_info["items"], key=lambda x: x["id"]): # Sort by ID
unique_id = ""
song_id = f"{song['id']}"
genre_no = song["genreNo"]
genre_name, genre_color = genre_map.get(genre_no, ("Unknown Genre", "white"))
english_title = song_titles.get(f"song_{song_id}", "-")
english_subtitle = song_subtitles.get(f"song_sub_{song_id}", "-")
star_easy = song.get("starEasy", "N/A")
star_normal = song.get("starNormal", "N/A")
star_hard = song.get("starHard", "N/A")
star_mania = song.get("starMania", "N/A")
star_ura = song.get("starUra", 0)
difficulty_info_parts = [
f"{star_easy}",
f"{star_normal}",
f"{star_hard}",
f"{star_mania}",
]
if star_ura > 0:
difficulty_info_parts.append(f"{star_ura}")
difficulty_info = " | ".join(difficulty_info_parts)
# Check if the search text matches the song name
if search_var.get().strip().lower() in english_title.lower():
item_id = tree.insert("", "end", values=("", unique_id, song_id, english_title, english_subtitle, genre_name, difficulty_info))
tree.tag_configure(genre_name, background=genre_color)
# Restore original selection after filtering
for item in current_selection:
if tree.exists(item): # Check if item exists in Treeview
tree.selection_add(item)
else:
print("Item not found:", item) # Debug print
search_entry.bind("<KeyRelease>", lambda event: populate_tree())
def sort_tree(sort_option):
# Clear existing items in the Treeview
for item in tree.get_children():
tree.delete(item)
if sort_option == "ID":
populate_tree() # Sort by ID
selection_count.set(0) # Reset Counter to 0
elif sort_option == "Song Name":
selection_count.set(0) # Reset Counter to 0
for song in sorted(music_info["items"], key=lambda x: song_titles.get(f"song_{x['id']}", "-")):
populate_song_entry(song)
elif sort_option == "Genre":
selection_count.set(0) # Reset Counter to 0
for genre_no in sorted(genre_map.keys()):
for song in sorted(music_info["items"], key=lambda x: x["id"]):
if song["genreNo"] == genre_no:
populate_song_entry(song)
def populate_song_entry(song):
unique_id = ""
song_id = f"{song['id']}"
genre_no = song["genreNo"]
genre_name, genre_color = genre_map.get(genre_no, ("Unknown Genre", "white"))
english_title = song_titles.get(f"song_{song_id}", "-")
english_subtitle = song_subtitles.get(f"song_sub_{song_id}", "-")
star_easy = song.get("starEasy", "N/A")
star_normal = song.get("starNormal", "N/A")
star_hard = song.get("starHard", "N/A")
star_mania = song.get("starMania", "N/A")
star_ura = song.get("starUra", 0)
difficulty_info_parts = [
f"{star_easy}",
f"{star_normal}",
f"{star_hard}",
f"{star_mania}",
]
if star_ura > 0:
difficulty_info_parts.append(f"{star_ura}")
difficulty_info = " | ".join(difficulty_info_parts)
item_id = tree.insert("", "end", values=("", unique_id, song_id, english_title, english_subtitle, genre_name, difficulty_info))
tree.tag_configure(genre_name, background=genre_color)
# Populate the Treeview initially
populate_tree()
def update_selection_count(event=None):
selected_items = tree.selection()
count_selected = selection_count.get() # Retrieve the value of selection_count
platform = game_platform_var.get()
if platform == "PS4":
max_entries = 400
elif platform == "NS1":
max_entries = 600
elif platform == "PTB":
max_entries = 200
else:
max_entries = 0
if len(selected_items) > max_entries:
messagebox.showerror("Selection Limit Exceeded", f"Maximum {max_entries} entries can be selected for {platform}.")
else:
# Update the selection count label text
selection_count_label.config(text=f"{count_selected}/{max_entries}")
# Bind the treeview selection event to update_selection_count function
tree.bind("<<TreeviewSelect>>", update_selection_count)
# Bind Treeview click event to toggle item selection
#tree.bind("<Button-1>", lambda event: toggle_selection(tree.identify_row(event.y)))
tree.bind("<Button-1>", toggle_checkbox)
#tree.bind("<Button-1>", on_treeview_click)
def preview_audio(song_id):
preview_pos = get_preview_pos(song_id)
if preview_pos is not None:
song_filename = f"data/sound/song_{song_id}.mp3"
subprocess.run(["ffplay", "-autoexit", "-ss", f"{preview_pos / 1000}", song_filename])
def get_preview_pos(song_id):
with open(previewpos_path, "r", encoding="utf-8") as previewpos_file:
previewpos_data = json.load(previewpos_file)
for item in previewpos_data:
if item["id"] == song_id:
return item["previewPos"]
return None
def preview_selected():
selected_item = tree.selection()
if selected_item:
song_id = tree.item(selected_item[0])["values"][2]
preview_audio(song_id)
def merge_ptb():
command = [
"python",
"script/ptb_wordlist.py",
]
subprocess.run(command)
def merge_ps4_int():
command = [
"python",
"script/ps4_wordlist.py",
]
subprocess.run(command)
def merge_ps4_jp():
command = [
"python",
"script/ps4_wordlist_jp.py",
]
subprocess.run(command)
def merge_ns1_int():
command = [
"python",
"script/ns1_wordlist.py",
]
subprocess.run(command)
def merge_ns1_jp():
command = [
"python",
"script/ns1_wordlist_jp.py",
]
subprocess.run(command)
def encrypt_file_ptb(input_file, output_file):
# Generate a random initialization vector (IV)
iv = os.urandom(16) # AES block size is 16 bytes
# Pad the key if necessary (AES-128 requires a 16-byte key)
key = bytes.fromhex("54704643596B474170554B6D487A597A")
# Create an AES CBC cipher with the given key and IV
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
encryptor = cipher.encryptor()
with open(input_file, 'rb') as f_in:
with open(output_file, 'wb') as f_out:
# Write the IV to the output file (needed for decryption)
f_out.write(iv)
# Encrypt the file chunk by chunk
while True:
chunk = f_in.read(16) # Read 16 bytes at a time
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
# Add padding to the last block if needed
padder = padding.PKCS7(128).padder()
padded_data = padder.update(chunk) + padder.finalize()
chunk = padded_data
encrypted_chunk = encryptor.update(chunk)
f_out.write(encrypted_chunk)
# Finalize the encryption (encryptor might have remaining data)
final_chunk = encryptor.finalize()
f_out.write(final_chunk)
def encrypt_file_ns1(input_file, output_file):
# Generate a random initialization vector (IV)
iv = os.urandom(16) # AES block size is 16 bytes
# Pad the key if necessary (AES-128 requires a 16-byte key)
key = bytes.fromhex("566342346438526962324A366334394B")
# Create an AES CBC cipher with the given key and IV
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
encryptor = cipher.encryptor()
with open(input_file, 'rb') as f_in:
with open(output_file, 'wb') as f_out:
# Write the IV to the output file (needed for decryption)
f_out.write(iv)
# Encrypt the file chunk by chunk
while True:
chunk = f_in.read(16) # Read 16 bytes at a time
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
# Add padding to the last block if needed
padder = padding.PKCS7(128).padder()
padded_data = padder.update(chunk) + padder.finalize()
chunk = padded_data
encrypted_chunk = encryptor.update(chunk)
f_out.write(encrypted_chunk)
# Finalize the encryption (encryptor might have remaining data)
final_chunk = encryptor.finalize()
f_out.write(final_chunk)
def gzip_compress_file(input_file_path):
# Extract the base filename without extension
file_name, file_ext = os.path.splitext(input_file_path)
# Output file path with .gz extension appended
output_file_path = f'{file_name}.gz'
with open(input_file_path, 'rb') as f_in:
with gzip.open(output_file_path, 'wb') as f_out:
f_out.writelines(f_in)
return output_file_path
def gzip_compress_file_ps4(input_file_path):
# Extract the base filename without extension
file_name, file_ext = os.path.splitext(input_file_path)
# Output file path with .gz extension appended
output_file_path = f'{file_name}.bin'
with open(input_file_path, 'rb') as f_in:
with gzip.open(output_file_path, 'wb') as f_out:
f_out.writelines(f_in)
return output_file_path
def copy_folder(source_folder, destination_folder):
"""
Copy the entire contents of source_folder to destination_folder.
Args:
source_folder (str): Path to the source folder to copy.
destination_folder (str): Path to the destination folder.
Returns:
bool: True if copy operation is successful, False otherwise.
"""
try:
# Check if destination folder already exists
if os.path.exists(destination_folder):
print(f"Destination folder '{destination_folder}' already exists. Skipping copy.")
return False
# Copy the entire folder from source to destination
shutil.copytree(source_folder, destination_folder)
print(f"Folder '{source_folder}' successfully copied to '{destination_folder}'.")
return True
except shutil.Error as e:
print(f"Error: {e}")
return False
except OSError as e:
print(f"Error: {e}")
return False
def export_data():
selected_items = []
for item_id in tree.get_children():
if tree.set(item_id, "Select") == "":
selected_items.append(item_id)
game_platform = game_platform_var.get()
game_region = game_region_var.get()
max_concurrent = config["max_concurrent"]
processed_ids = set() # Track processed song IDs
if game_platform == "PS4":
output_dir = "out/Data/ORBIS/datatable"
fumen_output_dir = "out/Data/ORBIS/fumen"
fumen_hitwide_output_dir = "out/Data/ORBIS/fumen_hitwide"
audio_output_dir = "out/Data/ORBIS/sound"
musicinfo_filename = "musicinfo.json"
max_entries = 400 # Maximum allowed entries for PS4
platform_tag = "ps4"
elif game_platform == "NS1":
output_dir = "out/Data/NX/datatable"
fumen_output_dir = "out/Data/NX/fumen/enso"
fumen_hitwide_output_dir = "out/Data/NX/fumen_hitwide/enso"
fumen_hitnarrow_output_dir = "out/Data/NX/fumen_hitnarrow/enso"
audio_output_dir = "out/Data/NX/sound"
musicinfo_filename = "musicinfo.json"
max_entries = 600 # Maximum allowed entries for NS1
platform_tag = "ns1"
elif game_platform == "PTB":
output_dir = "out/Data/Raw/ReadAssets"
fumen_output_dir = "out/Data/Raw/fumen"
audio_output_dir = "out/Data/Raw/sound/sound"
musicinfo_filename = "musicinfo.json"
songinfo_filename = "songinfo.json"
max_entries = 200 # Maximum allowed entries for PTB
platform_tag = "PTB"
os.makedirs(output_dir, exist_ok=True)
os.makedirs(fumen_output_dir, exist_ok=True)
os.makedirs(audio_output_dir, exist_ok=True)
selected_music_info = []
selected_song_info = []
selected_wordlist = []
current_unique_id = 0
try:
if len(selected_items) > max_entries:
messagebox.showerror("Selection Limit Exceeded", f"Maximum {max_entries} entries can be selected for {game_platform}.")
return
# Load preview position data
with open(previewpos_path, "r", encoding="utf-8") as previewpos_file:
previewpos_data = json.load(previewpos_file)
# Copy fumen folders for selected songs to output directory
for item_id in selected_items:
song_id = tree.item(item_id)["values"][2]
fumen_folder_path = os.path.join(data_dir, "fumen", str(song_id))
if os.path.exists(fumen_folder_path):
shutil.copytree(fumen_folder_path, os.path.join(fumen_output_dir, f"{song_id}"))
song_info = next((item for item in music_info["items"] if item["id"] == song_id), None)
for item_id in selected_items:
song_id = tree.item(item_id)["values"][2]
song_info = next((item for item in music_info["items"] if item["id"] == song_id), None)
if song_info:
# Calculate song_order based on genreNo and current_unique_id
song_order = (int(song_info["genreNo"]) * 1000) + current_unique_id
if game_platform == "NS1":
ns1_song_info = {
"id": song_info["id"],
"uniqueId": current_unique_id,
"songFileName": song_info["songFileName"],
"order": song_order,
"genreNo": song_info["genreNo"],
"secretFlag":False,
"dlc":False,
"debug":False,
"recording":True,
"branchEasy": song_info["branchEasy"],
"branchNormal": song_info["branchNormal"],
"branchHard": song_info["branchHard"],
"branchMania": song_info["branchMania"],
"branchUra": song_info["branchUra"],
"starEasy": song_info["starEasy"],
"starNormal": song_info["starNormal"],
"starHard": song_info["starHard"],
"starMania": song_info["starMania"],
"starUra": song_info["starUra"],
"shinutiEasy": song_info["shinutiEasy"],
"shinutiNormal": song_info["shinutiNormal"],
"shinutiHard": song_info["shinutiHard"],
"shinutiMania": song_info["shinutiMania"],
"shinutiUra": song_info["shinutiUra"],
"shinutiEasyDuet": song_info["shinutiEasyDuet"],
"shinutiNormalDuet": song_info["shinutiNormalDuet"],
"shinutiHardDuet": song_info["shinutiHardDuet"],
"shinutiManiaDuet": song_info["shinutiManiaDuet"],
"shinutiUraDuet": song_info["shinutiUraDuet"],
"scoreEasy": song_info["scoreEasy"],
"scoreNormal": song_info["scoreNormal"],
"scoreHard": song_info["scoreHard"],
"scoreMania": song_info["scoreMania"],
"scoreUra": song_info["scoreUra"],
"alleviationEasy": False,
"alleviationNormal": False,
"alleviationHard": False,
"alleviationMania": False,
"alleviationUra": False,
"song_info1": 25721,
"song_info2": 39634,
"song_info3": 60504,
"song_info4": 79618,
"song_info5": 98750,
"song_info6": -1,
"song_info7": -1,
"song_info8": -1,
"song_info9": -1,
"song_info10": -1,
"aocID": song_info["id"],
"limitedID": -1,
"extraID": -1,
"tournamentRand": True,
"bgDon0": "",
"bgDancer0": "",
"bgFever0": "",
"chibi0": "",
"rendaEffect0": "",
"dancer0": "",
"feverEffect0": "",
"bgDon1": "",
"bgDancer1": "",
"bgFever1": "",
"chibi1": "",
"rendaEffect1": "",
"dancer1": "",
"feverEffect1": "",
}
selected_music_info.append(ns1_song_info)
elif game_platform == "PS4":
ps4_song_info = {
"id": song_info["id"],
"uniqueId": current_unique_id,
"songFileName": song_info["songFileName"],
"order": song_order,
"genreNo": song_info["genreNo"],
"secretFlag":False,
"dlc":False,
"entitlementKey":"",
"secondKey":False,
"entitlementKey2":"",
"debug":False,
"branchEasy": song_info["branchEasy"],
"branchNormal": song_info["branchNormal"],
"branchHard": song_info["branchHard"],
"branchMania": song_info["branchMania"],
"branchUra": song_info["branchUra"],
"starEasy": song_info["starEasy"],
"starNormal": song_info["starNormal"],
"starHard": song_info["starHard"],
"starMania": song_info["starMania"],
"starUra": song_info["starUra"],
"shinutiEasy": song_info["shinutiEasy"],
"shinutiNormal": song_info["shinutiNormal"],
"shinutiHard": song_info["shinutiHard"],
"shinutiMania": song_info["shinutiMania"],
"shinutiUra": song_info["shinutiUra"],
"shinutiEasyDuet": song_info["shinutiEasyDuet"],
"shinutiNormalDuet": song_info["shinutiNormalDuet"],
"shinutiHardDuet": song_info["shinutiHardDuet"],
"shinutiManiaDuet": song_info["shinutiManiaDuet"],
"shinutiUraDuet": song_info["shinutiUraDuet"],
"scoreEasy": song_info["scoreEasy"],
"scoreNormal": song_info["scoreNormal"],
"scoreHard": song_info["scoreHard"],
"scoreMania": song_info["scoreMania"],
"scoreUra": song_info["scoreUra"],
"secret":False,
"songFileNameForSelect": song_info["songFileName"],
"bgSolo0":"",
"bgDuet0":"",
"chibi0":"",
"rendaEffect0":"",
"dancer0":"",
"feverEffect0":"",
"bgSolo1":"",
"bgDuet1":"",
"chibi1":"",
"rendaEffect1":"",
"dancer1":"",
"feverEffect1":""
}
selected_music_info.append(ps4_song_info)
elif game_platform == "PTB":
ptb_song_info = {
"uniqueId": current_unique_id,
"id": song_info["id"],
"songFileName": song_info["songFileName"],
"order": song_order,
"genreNo": song_info["genreNo"],
"isLock":False,
"isNew":False,
"debug":False,
"temp":False,
"temp2":False,
"branchEasy": song_info["branchEasy"],
"branchNormal": song_info["branchNormal"],
"branchHard": song_info["branchHard"],
"branchMania": song_info["branchMania"],
"branchUra": song_info["branchUra"],
"starEasy": song_info["starEasy"],
"starNormal": song_info["starNormal"],
"starHard": song_info["starHard"],
"starMania": song_info["starMania"],
"starUra": song_info["starUra"],
"shinutiEasy": song_info["shinutiEasy"],
"shinutiNormal": song_info["shinutiNormal"],
"shinutiHard": song_info["shinutiHard"],
"shinutiMania": song_info["shinutiMania"],
"shinutiUra": song_info["shinutiUra"],
"shinutiEasyDuet": song_info["shinutiEasyDuet"],
"shinutiNormalDuet": song_info["shinutiNormalDuet"],
"shinutiHardDuet": song_info["shinutiHardDuet"],
"shinutiManiaDuet": song_info["shinutiManiaDuet"],
"shinutiUraDuet": song_info["shinutiUraDuet"],
"scoreEasy": song_info["scoreEasy"],
"scoreNormal": song_info["scoreNormal"],
"scoreHard": song_info["scoreHard"],
"scoreMania": song_info["scoreMania"],
"scoreUra": song_info["scoreUra"],
}
selected_music_info.append(ptb_song_info)
# Find previewPos from previewpos.json based on song_id
preview_pos = None
for item in previewpos_data:
if item["id"] == song_info["id"]:
preview_pos = item["previewPos"]
break
ptb_extra_song_info = {
"uniqueId": current_unique_id,
"id": song_info["id"],
"previewPos": preview_pos if preview_pos is not None else 0, # Use 0 if previewPos not found
"fumenOffsetPos":0
}
selected_song_info.append(ptb_extra_song_info)
current_unique_id += 1
# Find the wordlist items corresponding to song variations
word_keys = [f"song_{song_id}", f"song_sub_{song_id}", f"song_detail_{song_id}"]
for key in word_keys:
word_info = next((item for item in word_list["items"] if item["key"] == key), None)
if word_info:
selected_wordlist.append(word_info)
if game_platform == "PS4":
# Find the corresponding preview position for the current song_id
preview_pos = next((item["previewPos"] for item in previewpos_data if item["id"] == song_id), None)
if preview_pos is not None:
# Run the audio conversion command based on the game platform
def convert_song(song_id):
preview_pos = get_preview_pos(song_id)
song_filename = f"data/sound/song_{song_id}.mp3"
output_file = os.path.join(audio_output_dir, f"song_{song_id}.nus3bank")
command = [
"python",
"conv.py",
song_filename,
"at9",
platform_tag,
str(preview_pos), # Convert preview_pos to string
song_id
]
subprocess.run(command)
shutil.move(f"song_{song_id}.nus3bank", output_file)
elif game_platform == "NS1":
# Find the corresponding preview position for the current song_id
preview_pos = next((item["previewPos"] for item in previewpos_data if item["id"] == song_id), None)
if preview_pos is not None:
# Run the audio conversion command based on the game platform
def convert_song(song_id):
preview_pos = get_preview_pos(song_id)
song_filename = f"data/sound/song_{song_id}.mp3"
output_file = os.path.join(audio_output_dir, f"song_{song_id}.nus3bank")
command = [
"python",
"conv.py",
song_filename,
"idsp",
platform_tag,
str(preview_pos), # Convert preview_pos to string
song_id
]
subprocess.run(command)
shutil.move(f"song_{song_id}.nus3bank", output_file)
elif game_platform == "PTB":
# Find the corresponding preview position for the current song_id
preview_pos = next((item["previewPos"] for item in previewpos_data if item["id"] == song_id), None)
if preview_pos is not None:
# Run the audio conversion command based on the game platform
def convert_song(song_id):
preview_pos = get_preview_pos(song_id)
song_filename = f"data/sound/song_{song_id}.mp3"
output_file = os.path.join(audio_output_dir, f"song_{song_id}.bin")
command = [
"python",
"script/acb/acb.py",
song_filename,
song_id
]
subprocess.run(command)
shutil.move(f"song_{song_id}.bin", output_file)
try:
if len(selected_items) > 0:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrent) as executor:
futures = []
for item_id in selected_items:
song_id = tree.item(item_id)["values"][2]
if song_id not in processed_ids:
# Submit conversion task for this song ID
futures.append(executor.submit(convert_song, song_id))
processed_ids.add(song_id) # Mark as processed
# Wait for all tasks to complete
concurrent.futures.wait(futures)
else:
messagebox.showinfo("No Songs Selected", "Please select songs to export.")
except Exception as e:
messagebox.showerror("Export Error", f"An error occurred during export: {str(e)}")
# Export selected musicinfo and wordlist
if game_platform == "PTB":
selected_musicinfo_path = os.path.join(output_dir, musicinfo_filename)
selected_wordlist_path = os.path.join(output_dir, "wordlist.json")
selected_songinfo_path = os.path.join(output_dir, songinfo_filename)
with open(selected_songinfo_path, "w", encoding="utf-8") as out_musicinfo_file:
json.dump({"items": selected_song_info}, out_musicinfo_file, ensure_ascii=False, indent=4)
with open(selected_musicinfo_path, "w", encoding="utf-8") as out_musicinfo_file:
json.dump({"items": selected_music_info}, out_musicinfo_file, ensure_ascii=False, indent=4)
with open(selected_wordlist_path, "w", encoding="utf-8") as out_wordlist_file:
json.dump({"items": selected_wordlist}, out_wordlist_file, ensure_ascii=False, indent=4)
merge_ptb()
#Compress each ReadAsset file
gzip_compress_file(selected_musicinfo_path)
gzip_compress_file(selected_wordlist_path)
gzip_compress_file(selected_songinfo_path)
#Compress each Remove the json files
os.remove(selected_musicinfo_path)
os.remove(selected_wordlist_path)
os.remove(selected_songinfo_path)
#Compressed File definitions
compressed_musicinfo_path = os.path.join(output_dir, "musicinfo.gz")
compressed_wordlist_path = os.path.join(output_dir, "wordlist.gz")
compressed_songinfo_path = os.path.join(output_dir, "songinfo.gz")
# Final Output definitions
final_musicinfo = os.path.join(output_dir, "musicinfo.bin")
final_wordlist = os.path.join(output_dir, "wordlist.bin")
final_songinfo = os.path.join(output_dir, "songinfo.bin")
# Encrypt the final files
encrypt_file_ptb(compressed_musicinfo_path, final_musicinfo)
encrypt_file_ptb(compressed_wordlist_path, final_wordlist)
encrypt_file_ptb(compressed_songinfo_path, final_songinfo)
# Remove compressed .gz files
os.remove(compressed_musicinfo_path)
os.remove(compressed_wordlist_path)
os.remove(compressed_songinfo_path)
elif game_platform == "PS4":
selected_musicinfo_path = os.path.join(output_dir, musicinfo_filename)
selected_wordlist_path = os.path.join(output_dir, "wordlist.json")
with open(selected_musicinfo_path, "w", encoding="utf-8") as out_musicinfo_file:
json.dump({"items": selected_music_info}, out_musicinfo_file, ensure_ascii=False, indent=4)
with open(selected_wordlist_path, "w", encoding="utf-8") as out_wordlist_file:
json.dump({"items": selected_wordlist}, out_wordlist_file, ensure_ascii=False, indent=4)
if game_region == "JPN/ASIA":
merge_ps4_jp()
elif game_region == "EU/USA":
merge_ps4_int()
#Compress each datatable file
gzip_compress_file_ps4(selected_musicinfo_path)
gzip_compress_file_ps4(selected_wordlist_path)
#Remove .json files
os.remove(selected_musicinfo_path)
os.remove(selected_wordlist_path)
copy_folder(fumen_output_dir,fumen_hitwide_output_dir)
elif game_platform == "NS1":
selected_musicinfo_path = os.path.join(output_dir, musicinfo_filename)
selected_wordlist_path = os.path.join(output_dir, "wordlist.json")
with open(selected_musicinfo_path, "w", encoding="utf-8") as out_musicinfo_file:
json.dump({"items": selected_music_info}, out_musicinfo_file, ensure_ascii=False, indent=4)
with open(selected_wordlist_path, "w", encoding="utf-8") as out_wordlist_file:
json.dump({"items": selected_wordlist}, out_wordlist_file, ensure_ascii=False, indent=4)
if game_region == "JPN/ASIA":
merge_ns1_jp()
elif game_region == "EU/USA":
merge_ns1_int()
#Compress each datatable file
gzip_compress_file(selected_musicinfo_path)
gzip_compress_file(selected_wordlist_path)
#Compress each Remove the json files
os.remove(selected_musicinfo_path)
os.remove(selected_wordlist_path)
#Compressed File definitions
compressed_musicinfo_path = os.path.join(output_dir, "musicinfo.gz")
compressed_wordlist_path = os.path.join(output_dir, "wordlist.gz")
# Final Output definitions
final_musicinfo = os.path.join(output_dir, "musicinfo.bin")
final_wordlist = os.path.join(output_dir, "wordlist.bin")
# Encrypt the final files
encrypt_file_ns1(compressed_musicinfo_path, final_musicinfo)
encrypt_file_ns1(compressed_wordlist_path, final_wordlist)
# Remove compressed .gz files
os.remove(compressed_musicinfo_path)
os.remove(compressed_wordlist_path)
copy_folder(fumen_output_dir,fumen_hitwide_output_dir)
copy_folder(fumen_output_dir,fumen_hitnarrow_output_dir)
messagebox.showinfo("Export Completed", "Selected songs exported successfully!")
except Exception as e:
messagebox.showerror("Export Error", f"An error occurred during export: {str(e)}")
#Button shenanigans, because the order they appear on the gui, is determined by the literal order they are in the code???
# Top Side
preview_button = ttk.Button(window, text="Preview", command=preview_selected)
preview_button.pack(side="top", padx=20, pady=10)
# Create sorting options
sort_options = ["ID", "Song Name", "Genre"]
sort_label = tk.Label(window, text="Sort by:")
sort_label.pack(side="top", padx=20, pady=5)
sort_var = tk.StringVar(window)
sort_var.set("ID")
sort_menu = tk.OptionMenu(window, sort_var, *sort_options, command=lambda _: sort_tree(sort_var.get()))
sort_menu.pack(side="top", padx=20, pady=0)
# search_entry.pack(side="top", padx=20, pady=10, fill="x") # search bar, currently broken
# Bottom Side
export_button = ttk.Button(window, text="Export", command=export_data)
export_button.pack(side="bottom", padx=20, pady=10)
# Create Selection Count Label
selection_count_label = ttk.Label(window, text="0/???")
selection_count_label.pack(side="bottom", padx=20, pady=10)
game_platform_var = tk.StringVar(window)
game_platform_var.set("PS4")
game_platform_choices = ["PS4", "NS1", "PTB"]
game_platform_menu = tk.OptionMenu(window, game_platform_var, *game_platform_choices)
game_platform_menu.pack(side="bottom", padx=20, pady=0)
# Create Label for Platform selection
platform_label = tk.Label(window, text="Platform")
platform_label.pack(side="bottom", padx=20, pady=5)
# Game region selection, needed for wordlist export.
game_region_var = tk.StringVar(window)
game_region_var.set("JPN/ASIA")
game_region_choices = ["JPN/ASIA", "EU/USA"]
game_region_menu = tk.OptionMenu(window, game_region_var, *game_region_choices)
game_region_menu.pack(side="bottom", padx=20, pady=10)
game_region_label = tk.Label(window, text="Game Region:")
game_region_label.pack(side="bottom", padx=20, pady=0)
# Doesn't function?
# Update selection count when tree selection changes
#tree.bind("<<TreeviewSelect>>", lambda event: update_selection_count())
window.mainloop()

View File

@ -0,0 +1,71 @@
import os
import sys
import subprocess
import concurrent.futures
from pydub import AudioSegment
# Function to process each .nus3bank file
def process_nus3bank(file):
if file.endswith('.nus3bank'):
base_name = os.path.splitext(os.path.basename(file))[0]
out_folder = "out"
wav_file = os.path.join(out_folder, f"{base_name}.wav")
command = f"vgmstream-cli.exe -o {wav_file} {file}"
subprocess.run(command, shell=True, check=True)
# Trim the first 20ms and convert to flac
process_wav_with_trim(wav_file)
# Function to process each .wav file by trimming and converting to .flac
def process_wav_with_trim(wav_file):
if wav_file.endswith('.wav'):
audio = AudioSegment.from_wav(wav_file)
# Trim the first 20ms
trimmed_audio = audio[20:] # Trim 20ms from the beginning
base_name = os.path.splitext(os.path.basename(wav_file))[0]
out_folder = "out"
flac_file = os.path.join(out_folder, f"{base_name}.flac")
# Export trimmed audio to compressed FLAC with specified sample rate (48000 Hz)
trimmed_audio.export(flac_file, format="flac", parameters=["-ar", "48000", "-compression_level", "8"])
# Clean up .wav file
os.remove(wav_file)
# Main function
def main():
if len(sys.argv) < 2:
print("Usage: python script.py path/to/input/folder")
return
input_folder = sys.argv[1]
# Check if the input folder exists
if not os.path.exists(input_folder):
print(f"Error: Input folder '{input_folder}' not found.")
return
out_folder = "out"
# Create output folder if it doesn't exist
os.makedirs(out_folder, exist_ok=True)
# List all .nus3bank files in the input folder
nus3bank_files = [os.path.join(input_folder, file) for file in os.listdir(input_folder) if file.endswith('.nus3bank')]
# Process files using a thread pool with 5 worker threads
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
# Submit each file processing task to the executor
futures = [executor.submit(process_nus3bank, file) for file in nus3bank_files]
# Wait for all tasks to complete
for future in concurrent.futures.as_completed(futures):
try:
future.result() # This will propagate exceptions if any occurred during execution
except Exception as exc:
print(f"An error occurred: {exc}")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,70 @@
import os
import sys
import subprocess
import concurrent.futures
from pydub import AudioSegment
# Function to process each .nus3bank file
def process_nus3bank(file):
if file.endswith('.nus3bank'):
base_name = os.path.splitext(os.path.basename(file))[0]
out_folder = "out"
wav_file = os.path.join(out_folder, f"{base_name}.wav")
command = f"vgmstream-cli.exe -o {wav_file} {file}"
subprocess.run(command, shell=True, check=True)
# Trim the first 20ms and convert to flac
process_wav_with_trim(wav_file)
# Function to process each .wav file by trimming and converting to .flac
def process_wav_with_trim(wav_file):
if wav_file.endswith('.wav'):
audio = AudioSegment.from_wav(wav_file)
trimmed_audio = audio[0:]
base_name = os.path.splitext(os.path.basename(wav_file))[0]
out_folder = "out"
flac_file = os.path.join(out_folder, f"{base_name}.flac")
# Export trimmed audio to compressed FLAC with specified sample rate (48000 Hz)
trimmed_audio.export(flac_file, format="flac", parameters=["-ar", "48000", "-compression_level", "8"])
# Clean up .wav file
os.remove(wav_file)
# Main function
def main():
if len(sys.argv) < 2:
print("Usage: python script.py path/to/input/folder")
return
input_folder = sys.argv[1]
# Check if the input folder exists
if not os.path.exists(input_folder):
print(f"Error: Input folder '{input_folder}' not found.")
return
out_folder = "out"
# Create output folder if it doesn't exist
os.makedirs(out_folder, exist_ok=True)
# List all .nus3bank files in the input folder
nus3bank_files = [os.path.join(input_folder, file) for file in os.listdir(input_folder) if file.endswith('.nus3bank')]
# Process files using a thread pool with 5 worker threads
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
# Submit each file processing task to the executor
futures = [executor.submit(process_nus3bank, file) for file in nus3bank_files]
# Wait for all tasks to complete
for future in concurrent.futures.as_completed(futures):
try:
future.result() # This will propagate exceptions if any occurred during execution
except Exception as exc:
print(f"An error occurred: {exc}")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,59 @@
import sys
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.backends import default_backend
KEY = bytes.fromhex("54704643596B474170554B6D487A597A")
IV = bytes.fromhex("FF" * 16) # IV for encryption
def encrypt_file(input_filename, output_filename):
with open(input_filename, 'rb') as infile:
plaintext = infile.read()
padder = padding.PKCS7(algorithms.AES.block_size).padder()
padded_data = padder.update(plaintext) + padder.finalize()
cipher = Cipher(algorithms.AES(KEY), modes.CBC(IV), backend=default_backend())
encryptor = cipher.encryptor()
ciphertext = encryptor.update(padded_data) + encryptor.finalize()
with open(output_filename, 'wb') as outfile:
outfile.write(IV + ciphertext)
def decrypt_file(input_filename, output_filename):
with open(input_filename, 'rb') as infile:
encrypted_data = infile.read()
iv = encrypted_data[:16] # Extract IV from the beginning of the file
cipher = Cipher(algorithms.AES(KEY), modes.CBC(iv), backend=default_backend())
decryptor = cipher.decryptor()
decrypted_data = decryptor.update(encrypted_data[16:]) + decryptor.finalize()
# Print the decrypted data (for debugging purposes)
#print("Decrypted data (hex):", decrypted_data.hex())
with open(output_filename, 'wb') as outfile:
outfile.write(decrypted_data)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python file_encrypt_decrypt.py <input_file> <output_file>")
sys.exit(1)
input_file = sys.argv[1]
output_file = sys.argv[2]
if os.path.exists(input_file):
if input_file != output_file:
if input("Encrypt (e) or Decrypt (d) the file? ").lower() == 'e':
encrypt_file(input_file, output_file)
print("Encryption complete.")
else:
decrypt_file(input_file, output_file)
print("Decryption complete.")
else:
print("Error: Output file must be different from input file.")
else:
print(f"Error: Input file '{input_file}' not found.")

View File

@ -0,0 +1,44 @@
import os
import json
def process_folders(root_folder):
data_entries = []
for foldername in os.listdir(root_folder):
folder_path = os.path.join(root_folder, foldername)
if os.path.isdir(folder_path):
process_subfolders(folder_path, data_entries)
sorted_data_entries = sort_entries_by_id(data_entries)
write_output_file(sorted_data_entries, root_folder)
def process_subfolders(folder_path, data_entries):
for subdir, _, files in os.walk(folder_path):
if 'data.json' in files:
data_json_path = os.path.join(subdir, 'data.json')
process_data_json(data_json_path, data_entries)
def process_data_json(data_json_path, data_entries):
try:
with open(data_json_path, 'r', encoding='utf-8') as data_file:
data = json.load(data_file)
id_value = data.get('id', '') # Get 'id' value or default to empty string
preview_pos = data.get('previewPos', 0) # Get 'previewPos' value or default to 0
data_entries.append({'id': id_value, 'previewPos': preview_pos})
except (json.JSONDecodeError, UnicodeDecodeError) as e:
print(f"Error reading {data_json_path}: {e}")
def sort_entries_by_id(data_entries):
# Sort data_entries list by 'id' field
sorted_entries = sorted(data_entries, key=lambda x: x['id'])
return sorted_entries
def write_output_file(data_entries, root_folder):
output_file_path = os.path.join(root_folder, 'output.json')
with open(output_file_path, 'w', encoding='utf-8') as output_file:
json.dump(data_entries, output_file, indent=2)
if __name__ == '__main__':
# Specify the root folder where you want to start processing
root_folder = '.' # Current directory where the script is executed
process_folders(root_folder)

View File

@ -0,0 +1,57 @@
import os
import json
def process_folders(root_folder):
data_entries = []
for foldername in os.listdir(root_folder):
folder_path = os.path.join(root_folder, foldername)
if os.path.isdir(folder_path):
process_subfolders(folder_path, data_entries)
write_output_file(data_entries, 'output_all.json', root_folder)
def process_subfolders(folder_path, data_entries):
for subdir, _, files in os.walk(folder_path):
if 'data.json' in files:
data_json_path = os.path.join(subdir, 'data.json')
process_data_json(data_json_path, data_entries)
def process_data_json(data_json_path, data_entries):
try:
with open(data_json_path, 'r', encoding='utf-8') as data_file:
data = json.load(data_file)
data_entry = {
"id": data["id"],
"songName": {
"jpText": data["songName"]["jpText"],
"jpFont": data["songName"]["jpFont"],
"enText": data["songName"]["enText"],
"enFont": data["songName"]["enFont"]
},
"songSubtitle": {
"jpText": data["songSubtitle"]["jpText"],
"jpFont": data["songSubtitle"]["jpFont"],
"enText": data["songSubtitle"]["enText"],
"enFont": data["songSubtitle"]["enFont"]
},
"songDetail": {
"jpText": data["songDetail"]["jpText"],
"jpFont": data["songDetail"]["jpFont"],
"enText": data["songDetail"]["enText"],
"enFont": data["songDetail"]["enFont"]
}
}
data_entries.append(data_entry)
except (json.JSONDecodeError, UnicodeDecodeError, KeyError) as e:
print(f"Error reading {data_json_path}: {e}")
def write_output_file(data_entries, filename, root_folder):
output_file_path = os.path.join(root_folder, filename)
with open(output_file_path, 'w', encoding='utf-8') as output_file:
json.dump(data_entries, output_file, indent=2, ensure_ascii=False)
if __name__ == '__main__':
# Specify the root folder where you want to start processing
root_folder = '.' # Current directory where the script is executed
process_folders(root_folder)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
import os
import argparse
import subprocess
import shutil
import tempfile
from pydub import AudioSegment
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
def parse_arguments():
parser = argparse.ArgumentParser(description='CLI tool to create .acb files and encrypt them')
parser.add_argument('input_audio', type=str, help='Path to the input audio file')
parser.add_argument('song_id', type=str, help='Song ID')
return parser.parse_args()
def encrypt_file(input_file, output_file, key, iv):
with open(input_file, 'rb') as f_in:
data = f_in.read()
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encryptor = cipher.encryptor()
padded_data = data + b'\0' * (16 - len(data) % 16) # Pad the data to make it a multiple of block size
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
# Write IV followed by encrypted data to output file
with open(output_file, 'wb') as f_out:
f_out.write(iv)
f_out.write(encrypted_data)
def main():
args = parse_arguments()
# Generate a unique random temporary folder name
with tempfile.TemporaryDirectory(prefix='song_') as temp_folder:
try:
# Convert input audio to 44100Hz WAV
input_audio = args.input_audio
temp_wav_file = os.path.join(temp_folder, f'input_{args.song_id}.wav')
audio = AudioSegment.from_file(input_audio)
audio = audio.set_frame_rate(44100)
audio.export(temp_wav_file, format='wav')
# Generate .hca file using VGAudioCli.exe
hca_folder = os.path.join(temp_folder, f'song_{args.song_id}')
os.makedirs(hca_folder, exist_ok=True)
hca_file = os.path.join(hca_folder, '00000.hca')
subprocess.run(['bin/VGAudioCli.exe', temp_wav_file, hca_file], check=True)
# Copy sample .acb template to temporary location
acb_template = 'templates/song_sample.acb'
temp_acb_file = os.path.join(temp_folder, f'song_{args.song_id}.acb')
shutil.copy(acb_template, temp_acb_file)
# Edit .acb using ACBEditor
subprocess.run(['bin/ACBEditor.exe', hca_folder], check=True)
# Encrypt .acb file to .bin with IV prepended
key = bytes.fromhex('54704643596B474170554B6D487A597A')
iv = bytes([0xFF] * 16)
encrypted_bin_file = os.path.join(temp_folder, f'song_{args.song_id}.bin')
encrypt_file(temp_acb_file, encrypted_bin_file, key, iv)
# Move encrypted .bin file to the root folder
final_bin_file = f'song_{args.song_id}.bin'
shutil.move(encrypted_bin_file, final_bin_file)
except Exception as e:
print(f"Error: {e}")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,50 @@
import os
import argparse
import subprocess
import shutil
import tempfile
from pydub import AudioSegment
def parse_arguments():
parser = argparse.ArgumentParser(description='CLI tool to create .acb files')
parser.add_argument('input_audio', type=str, help='Path to the input audio file')
parser.add_argument('song_id', type=str, help='Song ID')
return parser.parse_args()
def main():
args = parse_arguments()
# Generate a unique random temporary folder name
with tempfile.TemporaryDirectory(prefix='song_') as temp_folder:
try:
# Convert input audio to 44100Hz WAV
input_audio = args.input_audio
temp_wav_file = os.path.join(temp_folder, f'input_{args.song_id}.wav')
audio = AudioSegment.from_file(input_audio)
audio = audio.set_frame_rate(44100)
audio.export(temp_wav_file, format='wav')
# Generate .hca file using VGAudioCli.exe
hca_folder = os.path.join(temp_folder, f'song_{args.song_id}')
os.makedirs(hca_folder, exist_ok=True)
hca_file = os.path.join(hca_folder, '00000.hca')
subprocess.run(['bin/VGAudioCli.exe', temp_wav_file, hca_file], check=True)
# Copy sample .acb template to temporary location
acb_template = 'templates/song_sample.acb'
temp_acb_file = os.path.join(temp_folder, f'song_{args.song_id}.acb')
shutil.copy(acb_template, temp_acb_file)
# Edit .acb using ACBEditor
subprocess.run(['bin/ACBEditor.exe', hca_folder], check=True)
# Move .acb file to the current directory
final_acb_file = f'song_{args.song_id}.acb'
os.replace(temp_acb_file, final_acb_file)
except Exception as e:
print(f"Error: {e}")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,49 @@
import subprocess
import os
import sys
import shutil
import tempfile
from pydub import AudioSegment
def convert_audio_to_at9(input_file, output_file):
# Create a temporary folder to store intermediate files
temp_folder = tempfile.mkdtemp()
os.makedirs(temp_folder, exist_ok=True)
def convert_audio_to_at9(input_file, output_file):
# Create a unique temporary folder to store intermediate files
temp_folder = tempfile.mkdtemp()
try:
# Check if the input file is already in WAV format
if not input_file.lower().endswith('.wav'):
# Load the input audio file using pydub and convert to WAV
temp_wav_file = os.path.join(temp_folder, "temp.wav")
audio = AudioSegment.from_file(input_file)
audio.export(temp_wav_file, format="wav")
input_file = temp_wav_file
# Path to AT9Tool executable
at9tool_cli_path = os.path.join("bin", "at9tool.exe")
# Run VGAudioCli to convert WAV to AT9
subprocess.run([at9tool_cli_path, "-e", "-br", "192", input_file, output_file], check=True)
finally:
# Clean up temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
if __name__ == "__main__":
# Check command-line arguments
if len(sys.argv) != 3:
print("Usage: python at9.py <input_file> <output_file>")
sys.exit(1)
input_audio_file = sys.argv[1]
output_audio_file = sys.argv[2]
try:
convert_audio_to_at9(input_audio_file, output_audio_file)
print(f"Conversion successful. Output file: {output_audio_file}")
except Exception as e:
print(f"Error during conversion: {e}")

View File

@ -0,0 +1,93 @@
import subprocess
import os
import sys
import shutil
from pydub import AudioSegment
from pydub.exceptions import CouldntDecodeError
def convert_to_mono_48k(input_file, output_file):
"""Convert input audio file to 16-bit mono WAV with 48000 Hz sample rate."""
try:
audio = AudioSegment.from_file(input_file)
audio = audio.set_channels(1) # Convert to mono
audio = audio.set_frame_rate(48000) # Set frame rate to 48000 Hz
audio = audio.set_sample_width(2) # Set sample width to 16-bit (2 bytes)
audio.export(output_file, format='wav')
except CouldntDecodeError:
print(f"Error: Unable to decode {input_file}. Please provide a valid audio file.")
sys.exit(1)
def run_encode_tool(input_wav, output_bs):
"""Run external encode tool with specified arguments."""
subprocess.run(['bin/encode.exe', '0', input_wav, output_bs, '48000', '14000'])
def modify_bnsf_template(output_bs, output_bnsf, header_size, total_samples):
"""Modify the BNSF template file with calculated values and combine with output.bs."""
# Calculate the file size of output.bs
bs_file_size = os.path.getsize(output_bs)
# Create modified BNSF data
new_file_size = bs_file_size + header_size - 0x8
total_samples_bytes = total_samples.to_bytes(4, 'big')
bs_file_size_bytes = bs_file_size.to_bytes(4, 'big')
# Read BNSF template data
with open('templates/header.bnsf', 'rb') as template_file:
bnsf_template_data = bytearray(template_file.read())
# Modify BNSF template with calculated values
bnsf_template_data[0x4:0x8] = new_file_size.to_bytes(4, 'big') # File size
bnsf_template_data[0x1C:0x20] = total_samples_bytes # Total sample count
bnsf_template_data[0x2C:0x30] = bs_file_size_bytes # Size of output.bs
# Append output.bs data to modified BNSF template
with open(output_bs, 'rb') as bs_file:
bs_data = bs_file.read()
final_bnsf_data = bnsf_template_data + bs_data
# Write final BNSF file
with open(output_bnsf, 'wb') as output_file:
output_file.write(final_bnsf_data)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: bnsf.py <input_audio> [<output_bnsf>]")
sys.exit(1)
input_audio = sys.argv[1]
output_bnsf = sys.argv[2] if len(sys.argv) > 2 else 'output.bnsf'
# Create temp folder if it doesn't exist
temp_folder = 'temp'
os.makedirs(temp_folder, exist_ok=True)
# Temporary file paths
output_wav = os.path.join(temp_folder, 'output_mono.wav')
output_bs = os.path.join(temp_folder, 'output.bs')
# Header size (assuming fixed size)
header_size = 0x30
try:
# Step 1: Convert input audio to required format (WAV)
convert_to_mono_48k(input_audio, output_wav)
# Step 2: Run external encoding tool
run_encode_tool(output_wav, output_bs)
# Step 3: Get sample count from the converted mono WAV
mono_wav = AudioSegment.from_wav(output_wav)
total_samples = len(mono_wav.get_array_of_samples())
# Step 4: Modify BNSF template with calculated values and combine with output.bs
modify_bnsf_template(output_bs, output_bnsf, header_size, total_samples)
print("BNSF file created:", output_bnsf)
finally:
# Cleanup: Delete temporary files and temp folder
if os.path.exists(temp_folder):
shutil.rmtree(temp_folder)

View File

@ -0,0 +1,44 @@
import subprocess
import os
import sys
import shutil
import tempfile
from pydub import AudioSegment
def convert_audio_to_idsp(input_file, output_file):
# Create a unique temporary folder to store intermediate files
temp_folder = tempfile.mkdtemp()
try:
# Check if the input file is already in WAV format
if not input_file.lower().endswith('.wav'):
# Load the input audio file using pydub and convert to WAV
temp_wav_file = os.path.join(temp_folder, "temp.wav")
audio = AudioSegment.from_file(input_file)
audio.export(temp_wav_file, format="wav")
input_file = temp_wav_file
# Path to VGAudioCli executable
vgaudio_cli_path = os.path.join("bin", "VGAudioCli.exe")
# Run VGAudioCli to convert WAV to IDSP
subprocess.run([vgaudio_cli_path, "-i", input_file, "-o", output_file], check=True)
finally:
# Clean up temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
if __name__ == "__main__":
# Check command-line arguments
if len(sys.argv) != 3:
print("Usage: python idsp.py <input_file> <output_file>")
sys.exit(1)
input_audio_file = sys.argv[1]
output_audio_file = sys.argv[2]
try:
convert_audio_to_idsp(input_audio_file, output_audio_file)
print(f"Conversion successful. Output file: {output_audio_file}")
except Exception as e:
print(f"Error during conversion: {e}")

View File

@ -0,0 +1,46 @@
import subprocess
import os
import sys
import shutil
import tempfile
from pydub import AudioSegment
def convert_audio_to_opus(input_file, output_file):
# Create a unique temporary folder to store intermediate files
temp_folder = tempfile.mkdtemp()
try:
# Check if the input file is already in WAV format
if not input_file.lower().endswith('.wav'):
# Load the input audio file using pydub and convert to WAV
temp_wav_file = os.path.join(temp_folder, "temp.wav")
audio = AudioSegment.from_file(input_file)
audio = audio.set_frame_rate(48000) # Set frame rate to 48000 Hz
audio.export(temp_wav_file, format="wav")
input_file = temp_wav_file
# Path to VGAudioCli executable
vgaudio_cli_path = os.path.join("bin", "VGAudioCli.exe")
# Run VGAudioCli to convert WAV to Switch OPUS
subprocess.run([vgaudio_cli_path, "-i", input_file, "-o", output_file, "--opusheader", "namco"], check=True)
finally:
# Clean up temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
if __name__ == "__main__":
# Check command-line arguments
if len(sys.argv) != 3:
print("Usage: python opus.py <input_file> <output_file>")
sys.exit(1)
input_audio_file = sys.argv[1]
output_audio_file = sys.argv[2]
try:
convert_audio_to_opus(input_audio_file, output_audio_file)
print(f"Conversion successful. Output file: {output_audio_file}")
except Exception as e:
print(f"Error during conversion: {e}")

View File

@ -0,0 +1,62 @@
import json
def merge_wordlists(file1_path, file2_path, output_path):
# Load the contents of the first wordlist file
with open(file1_path, 'r', encoding='utf-8') as file1:
data1 = json.load(file1)
# Load the contents of the second wordlist file
with open(file2_path, 'r', encoding='utf-8') as file2:
data2 = json.load(file2)
# Define keys to remove from data1
keys_to_remove_data1 = ["japaneseText", "chineseTText","chineseTFontType","koreanText","koreanFontType"]
# Filter out entries from file 1 where key starts with "song_" and remove specific keys
filtered_items_data1 = []
for item in data1['items']:
if not item['key'].startswith('song_'):
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data1}
filtered_items_data1.append(filtered_item)
# Define keys to remove from data2
keys_to_remove_data2 = ["japaneseText", "japaneseFontType", "chineseTText","chineseTFontType","koreanText","koreanFontType"]
for item2 in data2['items']:
# Set englishUsFontType to 3
item2['englishUsFontType'] = 0
# Add missing translation fields using englishUsText from file 2
languages = ['french', 'italian', 'german', 'spanish']
for lang in languages:
if lang + 'Text' not in item2:
item2[lang + 'Text'] = item2['englishUsText']
item2[lang + 'FontType'] = 3
for item3 in data2['items']:
if not item3['key'].startswith('song_detail_'):
item3['englishUsFontType'] = 3
# Filter out specific keys from entries in file 2
filtered_items_data2 = []
for item in data2['items']:
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data2}
filtered_items_data2.append(filtered_item)
# Extend filtered data1 with filtered data2
filtered_items_data1.extend(filtered_items_data2)
# Update data1 with the merged and filtered items
data1['items'] = filtered_items_data1
# Save the updated JSON back to file
with open(output_path, 'w', encoding='utf-8') as output_file:
json.dump(data1, output_file, indent=4, ensure_ascii=False)
print(f"Merged wordlists saved to '{output_path}'.")
# Example usage:
merge_wordlists('data\\_console\\NX\\datatable\\wordlist.json', 'out\\Data\\NX\\datatable\\wordlist.json', 'out\\Data\\NX\\datatable\\wordlist.json')

View File

@ -0,0 +1,55 @@
import json
def merge_wordlists(file1_path, file2_path, output_path):
# Load the contents of the first wordlist file
with open(file1_path, 'r', encoding='utf-8') as file1:
data1 = json.load(file1)
# Load the contents of the second wordlist file
with open(file2_path, 'r', encoding='utf-8') as file2:
data2 = json.load(file2)
# Define keys to remove from data1
keys_to_remove_data1 = ["frenchText", "frenchFontType", "italianText", "italianFontType", "germanText", "germanFontType", "spanishText", "spanishFontType"]
# Filter out entries from file 1 where key starts with "song_" and remove specific keys
filtered_items_data1 = []
for item in data1['items']:
if not item['key'].startswith('song_'):
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data1}
filtered_items_data1.append(filtered_item)
# Define keys to remove from data2
keys_to_remove_data2 = ["japaneseFontType"]
for item2 in data2['items']:
# Set englishUsFontType to 3
item2['englishUsFontType'] = 0
for item3 in data2['items']:
if not item3['key'].startswith('song_detail_'):
item3['englishUsFontType'] = 3
# Filter out specific keys from entries in file 2
filtered_items_data2 = []
for item in data2['items']:
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data2}
filtered_items_data2.append(filtered_item)
# Extend filtered data1 with filtered data2
filtered_items_data1.extend(filtered_items_data2)
# Update data1 with the merged and filtered items
data1['items'] = filtered_items_data1
# Save the updated JSON back to file
with open(output_path, 'w', encoding='utf-8') as output_file:
json.dump(data1, output_file, indent=4, ensure_ascii=False)
print(f"Merged wordlists saved to '{output_path}'.")
# Example usage:
merge_wordlists('data\\_console\\NX\\datatable\\wordlist.json', 'out\\Data\\NX\\datatable\\wordlist.json', 'out\\Data\\NX\\datatable\\wordlist.json')

View File

@ -0,0 +1,272 @@
import sys
import os
import struct
import random
def generate_random_uint16_hex():
return format(random.randint(0, 65535), '04X')
def load_template_config():
# Load template configurations from config.toml (if needed in the future)
# This function can be expanded to load more template configurations if necessary
# For now, we don't need to use this function directly for selecting templates
return {}
def select_template_name(game, output_file):
# Determine the appropriate template name based on the game and the length of the output file name
base_filename = os.path.splitext(output_file)[0]
length = len(base_filename)
if game == "nijiiro":
if length == 8:
return "song_ABC"
elif length == 9:
return "song_ABCD"
elif length == 10:
return "song_ABCDE"
elif length == 11:
return "song_ABCDEF"
elif length == 12:
return "song_ABCDEFG"
elif length == 13:
return "song_ABCDEFGH"
elif game == "ps4":
if length == 8:
return "song_ABC"
elif length == 9:
return "song_ABCD"
elif length == 10:
return "song_ABCDE"
elif length == 11:
return "song_ABCDEF"
elif game == "ns1":
if length == 8:
return "song_ABC"
elif length == 9:
return "song_ABCD"
elif length == 10:
return "song_ABCDE"
elif length == 11:
return "song_ABCDEF"
pass
elif game == "wiiu3":
if length == 8:
return "song_ABC"
elif length == 9:
return "song_ABCD"
elif length == 10:
return "song_ABCDE"
elif length == 11:
return "song_ABCDEF"
pass
raise ValueError("Unsupported game or output file name length.")
def modify_nus3bank_template(game, template_name, audio_file, preview_point, output_file):
# Define game-specific template configurations
game_templates = {
"nijiiro": {
"template_folder": "nijiiro",
"templates": {
"song_ABC": {
"unique_id_offset": 176,
"audio_size_offsets": [76, 1568, 1852],
"preview_point_offset": 1724,
"song_placeholder": "song_ABC",
"template_file": "song_ABC.nus3bank"
},
"song_ABCD": {
"unique_id_offset": 176,
"audio_size_offsets": [76, 1568, 1852],
"preview_point_offset": 1724,
"song_placeholder": "song_ABCD",
"template_file": "song_ABCD.nus3bank"
},
"song_ABCDE": {
"unique_id_offset": 176,
"audio_size_offsets": [76, 1568, 1852],
"preview_point_offset": 1724,
"song_placeholder": "song_ABCDE",
"template_file": "song_ABCDE.nus3bank"
},
"song_ABCDEF": {
"unique_id_offset": 180,
"audio_size_offsets": [76, 1576, 1868],
"preview_point_offset": 1732,
"song_placeholder": "song_ABCDEF",
"template_file": "song_ABCDEF.nus3bank"
},
"song_ABCDEFG": {
"unique_id_offset": 180,
"audio_size_offsets": [76, 1672, 1964],
"preview_point_offset": 1824,
"song_placeholder": "song_ABCDEFG",
"template_file": "song_ABCDEFG.nus3bank"
},
"song_ABCDEFGH": {
"unique_id_offset": 180,
"audio_size_offsets": [76, 1576, 1868],
"preview_point_offset": 1732,
"song_placeholder": "song_ABCDEFGH",
"template_file": "song_ABCDEFGH.nus3bank"
},
}
},
"ns1": {
"template_folder": "ns1",
"templates": {
"song_ABC": {
"audio_size_offsets": [76, 5200, 5420],
"preview_point_offset": 5324,
"song_placeholder": "SONG_ABC",
"template_file": "SONG_ABC.nus3bank"
},
"song_ABCD": {
"audio_size_offsets": [76, 5200, 5420],
"preview_point_offset": 5324,
"song_placeholder": "SONG_ABCD",
"template_file": "SONG_ABCD.nus3bank"
},
"song_ABCDE": {
"audio_size_offsets": [76, 5200, 5404],
"preview_point_offset": 5320,
"song_placeholder": "SONG_ABCDE",
"template_file": "SONG_ABCDE.nus3bank"
},
"song_ABCDEF": {
"audio_size_offsets": [76, 5208, 5420],
"preview_point_offset": 5324,
"song_placeholder": "SONG_ABCDEF",
"template_file": "SONG_ABCDEF.nus3bank"
}
}
},
"ps4": {
"template_folder": "ps4",
"templates": {
"song_ABC": {
"audio_size_offsets": [76, 3220, 3436],
"preview_point_offset": 3344,
"song_placeholder": "SONG_ABC",
"template_file": "SONG_ABC.nus3bank"
},
"song_ABCD": {
"audio_size_offsets": [76, 3220, 3436],
"preview_point_offset": 3344,
"song_placeholder": "SONG_ABCD",
"template_file": "SONG_ABCD.nus3bank"
},
"song_ABCDE": {
"audio_size_offsets": [76, 3220, 3436],
"preview_point_offset": 3344,
"song_placeholder": "SONG_ABCDE",
"template_file": "SONG_ABCDE.nus3bank"
},
"song_ABCDEF": {
"audio_size_offsets": [76, 3228, 3452],
"preview_point_offset": 3352,
"song_placeholder": "SONG_ABCDEF",
"template_file": "SONG_ABCDEF.nus3bank"
}
}
},
"wiiu3": {
"template_folder": "wiiu3",
"templates": {
"song_ABC": {
"audio_size_offsets": [76, 3420, 3612],
"preview_point_offset": 3540,
"song_placeholder": "SONG_ABC",
"template_file": "SONG_ABC.nus3bank"
},
"song_ABCD": {
"audio_size_offsets": [76, 3420, 3612],
"preview_point_offset": 3540,
"song_placeholder": "SONG_ABCD",
"template_file": "SONG_ABCD.nus3bank"
},
"song_ABCDE": {
"audio_size_offsets": [76, 3420, 3612],
"preview_point_offset": 3540,
"song_placeholder": "SONG_ABCDE",
"template_file": "SONG_ABCDE.nus3bank"
},
"song_ABCDEF": {
"audio_size_offsets": [76, 3428, 3612],
"preview_point_offset": 3548,
"song_placeholder": "SONG_ABCDEF",
"template_file": "SONG_ABCDEF.nus3bank"
}
}
},
}
if game not in game_templates:
raise ValueError("Unsupported game.")
templates_config = game_templates[game]
if template_name not in templates_config["templates"]:
raise ValueError(f"Unsupported template for {game}.")
template_config = templates_config["templates"][template_name]
template_folder = templates_config["template_folder"]
# Read template nus3bank file from the specified game's template folder
template_file = os.path.join("templates", template_folder, template_config['template_file'])
with open(template_file, 'rb') as f:
template_data = bytearray(f.read())
# Set unique ID if it exists in the template configuration
if 'unique_id_offset' in template_config:
# Generate random UInt16 hex for unique ID
unique_id_hex = generate_random_uint16_hex()
# Set unique ID in the template data at the specified offset
template_data[template_config['unique_id_offset']:template_config['unique_id_offset']+2] = bytes.fromhex(unique_id_hex)
# Get size of the audio file in bytes
audio_size = os.path.getsize(audio_file)
# Convert audio size to UInt32 bytes in little-endian format
size_bytes = audio_size.to_bytes(4, 'little')
# Set audio size in the template data at the specified offsets
for offset in template_config['audio_size_offsets']:
template_data[offset:offset+4] = size_bytes
# Convert preview point (milliseconds) to UInt32 bytes in little-endian format
preview_point_ms = int(preview_point)
preview_point_bytes = preview_point_ms.to_bytes(4, 'little')
# Set preview point in the template data at the specified offset
template_data[template_config['preview_point_offset']:template_config['preview_point_offset']+4] = preview_point_bytes
# Replace song name placeholder with the output file name in bytes
output_file_bytes = output_file.encode('utf-8')
template_data = template_data.replace(template_config['song_placeholder'].encode('utf-8'), output_file_bytes.replace(b'.nus3bank', b''))
# Append the audio file contents to the modified template data
with open(audio_file, 'rb') as audio:
template_data += audio.read()
# Write the modified data to the output file
with open(output_file, 'wb') as out:
out.write(template_data)
if __name__ == "__main__":
if len(sys.argv) != 5:
print("Usage: nus3.py <game> <audio_file> <preview_point> <output_file>")
sys.exit(1)
game = sys.argv[1]
audio_file = sys.argv[2]
preview_point = sys.argv[3]
output_file = sys.argv[4]
try:
template_name = select_template_name(game, output_file)
modify_nus3bank_template(game, template_name, audio_file, preview_point, output_file)
print(f"Created {output_file} successfully.")
except ValueError as e:
print(f"Error: {e}")
sys.exit(1)

View File

@ -0,0 +1,48 @@
import json
def merge_wordlists(file1_path, file2_path, output_path):
# Load the contents of the first wordlist file
with open(file1_path, 'r', encoding='utf-8') as file1:
data1 = json.load(file1)
# Load the contents of the second wordlist file
with open(file2_path, 'r', encoding='utf-8') as file2:
data2 = json.load(file2)
# Define keys to remove from data1, for space saving reasons. (sorry south americans)
keys_to_remove_data1 = ["neutralSpanishText","neutralSpanishFontType","brazilPortugueseText","brazilPortugueseFontType"]
# Filter out entries from file 1 where key starts with "song_" and remove specific keys
filtered_items_data1 = []
for item in data1['items']:
if not item['key'].startswith('song_'):
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data1}
#filtered_items = [item for item in data1['items'] if not item['key'].startswith('song_')]
filtered_items_data1.append(filtered_item)
# Define keys to remove from data2
keys_to_remove_data2 = ["japaneseText", "japaneseFontType", "chineseTText","chineseTFontType","koreanText","koreanFontType"]
# Filter out specific keys from entries in file 2
filtered_items_data2 = []
for item in data2['items']:
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data2}
filtered_items_data2.append(filtered_item)
# Extend filtered data1 with filtered data2
filtered_items_data1.extend(filtered_items_data2)
# Update data1 with the merged and filtered items
data1['items'] = filtered_items_data1
# Save the updated JSON back to file
with open(output_path, 'w', encoding='utf-8') as output_file:
json.dump(data1, output_file, indent=4, ensure_ascii=False)
print(f"Merged wordlists saved to '{output_path}'.")
# Example usage:
merge_wordlists('data\\_console\\ORBIS\\datatableint\\wordlist.json', 'out\\Data\\ORBIS\\datatable\\wordlist.json', 'out\\Data\\ORBIS\\datatable\\wordlist.json')

View File

@ -0,0 +1,47 @@
import json
def merge_wordlists(file1_path, file2_path, output_path):
# Load the contents of the first wordlist file
with open(file1_path, 'r', encoding='utf-8') as file1:
data1 = json.load(file1)
# Load the contents of the second wordlist file
with open(file2_path, 'r', encoding='utf-8') as file2:
data2 = json.load(file2)
# Define keys to remove from data1
keys_to_remove_data1 = ["frenchText", "frenchFontType", "italianText", "italianFontType", "germanText", "germanFontType", "spanishText", "spanishFontType","neutralSpanishText","neutralSpanishFontType","brazilPortugueseText","brazilPortugueseFontType"]
# Filter out entries from file 1 where key starts with "song_" and remove specific keys
filtered_items_data1 = []
for item in data1['items']:
if not item['key'].startswith('song_'):
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data1}
filtered_items_data1.append(filtered_item)
# Define keys to remove from data2
keys_to_remove_data2 = [""]
# Filter out specific keys from entries in file 2
filtered_items_data2 = []
for item in data2['items']:
# Remove specific keys from item
filtered_item = {k: v for k, v in item.items() if k not in keys_to_remove_data2}
filtered_items_data2.append(filtered_item)
# Extend filtered data1 with filtered data2
filtered_items_data1.extend(filtered_items_data2)
# Update data1 with the merged and filtered items
data1['items'] = filtered_items_data1
# Save the updated JSON back to file
with open(output_path, 'w', encoding='utf-8') as output_file:
json.dump(data1, output_file, indent=4, ensure_ascii=False)
print(f"Merged wordlists saved to '{output_path}'.")
# Example usage:
merge_wordlists('data\\_console\\ORBIS\\datatablejp\\wordlist.json', 'out\\Data\\ORBIS\\datatable\\wordlist.json', 'out\\Data\\ORBIS\\datatable\\wordlist.json')

View File

@ -0,0 +1,41 @@
import json
def merge_wordlists(file1_path, file2_path, output_path):
# Load the contents of the first wordlist file
with open(file1_path, 'r', encoding='utf-8') as file1:
data1 = json.load(file1)
# Load the contents of the second wordlist file
with open(file2_path, 'r', encoding='utf-8') as file2:
data2 = json.load(file2)
# Filter out entries from file 1 where key starts with "song_"
filtered_items = [item for item in data1['items'] if not item['key'].startswith('song_')]
# Update entries from file 2 and add them to the filtered list
for item2 in data2['items']:
# Set englishUsFontType to 3
item2['englishUsFontType'] = 3
# Add missing translation fields using englishUsText from file 2
languages = ['french', 'italian', 'german', 'spanish', 'chineseT', 'korean',
'portuguese', 'russian', 'turkish', 'arabic', 'dutch', 'chineseS']
for lang in languages:
if lang + 'Text' not in item2:
item2[lang + 'Text'] = item2['englishUsText']
item2[lang + 'FontType'] = 3
# Add updated item from file 2 to the filtered list
filtered_items.append(item2)
# Update data1 with the merged and filtered items
data1['items'] = filtered_items
# Save the updated JSON back to file
with open(output_path, 'w', encoding='utf-8') as output_file:
json.dump(data1, output_file, indent=4, ensure_ascii=False)
print(f"Merged wordlists saved to '{output_path}'.")
merge_wordlists('data\\_console\\Raw\\ReadAssets\\wordlist.json', 'out\\Data\\Raw\\ReadAssets\\wordlist.json', 'out\\Data\\Raw\\ReadAssets\\wordlist.json')

View File

@ -0,0 +1,22 @@
import os
import sys
import subprocess
def run_script(script_name, script_args):
script_path = os.path.join('script', script_name, f'{script_name}.py')
if os.path.exists(script_path):
command = ['python', script_path] + script_args
subprocess.run(command)
else:
print(f"Script '{script_name}' not found.")
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python launcher.py <script_name> [<script_args>]")
sys.exit(1)
script_name = sys.argv[1]
script_args = sys.argv[2:] # Capture all arguments after script_name
run_script(script_name, script_args)

View File

@ -0,0 +1,33 @@
import os
import sys
from pydub import AudioSegment
def convert_audio_to_wav(input_file, output_file):
try:
# Load the input audio file using pydub
audio = AudioSegment.from_file(input_file)
# Ensure the output file has a .wav extension
if not output_file.lower().endswith('.wav'):
output_file += '.wav'
# Export the audio to WAV format
audio.export(output_file, format="wav")
except Exception as e:
raise RuntimeError(f"Error during WAV conversion: {e}")
if __name__ == "__main__":
# Check command-line arguments
if len(sys.argv) != 3:
print("Usage: python audio_converter.py <input_file> <output_file>")
sys.exit(1)
input_audio_file = sys.argv[1]
output_audio_file = sys.argv[2]
try:
convert_audio_to_wav(input_audio_file, output_audio_file)
print(f"Conversion successful. Output file: {output_audio_file}")
except Exception as e:
print(f"Error during conversion: {e}")

Binary file not shown.

Binary file not shown.