2019-12-08 22:43:49 +01:00
|
|
|
import hashlib
|
|
|
|
import io
|
|
|
|
import os
|
|
|
|
import struct
|
2023-02-17 04:32:27 +01:00
|
|
|
from PIL import Image
|
2021-05-06 21:36:24 +02:00
|
|
|
from typing import Callable, Dict, List, Optional, Tuple
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-05-24 19:37:47 +02:00
|
|
|
from bemani.format.dxt import DXTBuffer
|
2019-12-08 22:43:49 +01:00
|
|
|
from bemani.protocol.binary import BinaryEncoding
|
|
|
|
from bemani.protocol.xml import XmlEncoding
|
|
|
|
from bemani.protocol.lz77 import Lz77
|
|
|
|
from bemani.protocol.node import Node
|
|
|
|
|
|
|
|
|
|
|
|
class IFS:
|
|
|
|
"""
|
|
|
|
Best-effort utility for decoding the `.ifs` file format. There are better tools out
|
|
|
|
there, but this was developed before their existence. This should work with most of
|
|
|
|
the games out there including non-rhythm games that use this format.
|
|
|
|
"""
|
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
data: bytes,
|
2022-10-15 20:56:30 +02:00
|
|
|
decode_binxml: bool = False,
|
|
|
|
decode_textures: bool = False,
|
|
|
|
keep_hex_names: bool = False,
|
|
|
|
reference_loader: Optional[Callable[[str], Optional["IFS"]]] = None,
|
2021-05-06 21:36:24 +02:00
|
|
|
) -> None:
|
2019-12-08 22:43:49 +01:00
|
|
|
self.__files: Dict[str, bytes] = {}
|
2021-04-14 03:08:13 +02:00
|
|
|
self.__formats: Dict[str, str] = {}
|
|
|
|
self.__compressed: Dict[str, bool] = {}
|
2021-04-16 01:16:03 +02:00
|
|
|
self.__imgsize: Dict[str, Tuple[int, int, int, int]] = {}
|
|
|
|
self.__uvsize: Dict[str, Tuple[int, int, int, int]] = {}
|
2019-12-08 22:43:49 +01:00
|
|
|
self.__decode_binxml = decode_binxml
|
2021-05-06 21:36:24 +02:00
|
|
|
self.__keep_hex_names = keep_hex_names
|
2019-12-08 22:43:49 +01:00
|
|
|
self.__decode_textures = decode_textures
|
2021-05-06 21:36:24 +02:00
|
|
|
self.__loader = reference_loader
|
2019-12-08 22:43:49 +01:00
|
|
|
self.__parse_file(data)
|
|
|
|
|
|
|
|
def __fix_name(self, filename: str) -> str:
|
2022-10-15 20:56:30 +02:00
|
|
|
if filename[0] == "_" and filename[1].isdigit():
|
2019-12-08 22:43:49 +01:00
|
|
|
filename = filename[1:]
|
2022-10-15 20:56:30 +02:00
|
|
|
filename = filename.replace("_E", ".")
|
|
|
|
filename = filename.replace("__", "_")
|
2019-12-08 22:43:49 +01:00
|
|
|
return filename
|
|
|
|
|
|
|
|
def __parse_file(self, data: bytes) -> None:
|
|
|
|
# Grab the magic values and make sure this is an IFS
|
2022-10-15 20:56:30 +02:00
|
|
|
(
|
|
|
|
signature,
|
|
|
|
version,
|
|
|
|
version_crc,
|
|
|
|
pack_time,
|
|
|
|
unpacked_header_size,
|
|
|
|
data_index,
|
|
|
|
) = struct.unpack(
|
|
|
|
">IHHIII",
|
2019-12-08 22:43:49 +01:00
|
|
|
data[0:20],
|
|
|
|
)
|
|
|
|
if signature != 0x6CAD8F89:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception("Invalid IFS file!")
|
2019-12-08 22:43:49 +01:00
|
|
|
if version ^ version_crc != 0xFFFF:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception("Corrupt version in IFS file!")
|
2019-12-08 22:43:49 +01:00
|
|
|
|
|
|
|
if version == 1:
|
|
|
|
# No header MD5
|
|
|
|
header_offset = 20
|
|
|
|
else:
|
|
|
|
# Make room for header MD5, at byte offset 20-36
|
|
|
|
header_offset = 36
|
|
|
|
|
|
|
|
# First, try as binary
|
|
|
|
benc = BinaryEncoding()
|
|
|
|
header = benc.decode(data[header_offset:data_index])
|
|
|
|
|
|
|
|
if header is None:
|
|
|
|
# Now, try as XML
|
|
|
|
xenc = XmlEncoding()
|
|
|
|
header = xenc.decode(
|
2022-10-15 20:56:30 +02:00
|
|
|
b'<?xml encoding="ascii"?>'
|
|
|
|
+ data[header_offset:data_index].split(b"\0")[0]
|
2019-12-08 22:43:49 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
if header is None:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception("Invalid IFS file!")
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
files: Dict[str, Tuple[int, int, int, Optional[str]]] = {}
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
if header.name != "imgfs":
|
|
|
|
raise Exception("Unknown IFS format!")
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
# Grab any super-files that this file might reference.
|
|
|
|
header_md5: Optional[int] = None
|
|
|
|
header_size: Optional[int] = None
|
2022-10-15 20:56:30 +02:00
|
|
|
supers: List[Tuple[str, bytes]] = [("__INVALID__", b"")]
|
2021-05-06 21:36:24 +02:00
|
|
|
|
|
|
|
for child in header.children:
|
|
|
|
if child.name == "_info_":
|
2022-10-15 20:56:30 +02:00
|
|
|
header_md5 = child.child_value("md5") # NOQA
|
|
|
|
header_size = child.child_value("size") # NOQA
|
2021-05-06 21:36:24 +02:00
|
|
|
elif child.name == "_super_":
|
|
|
|
super_name = child.value
|
2022-10-15 20:56:30 +02:00
|
|
|
super_md5 = child.child_value("md5")
|
2021-05-31 20:13:43 +02:00
|
|
|
if not isinstance(super_name, str) or not isinstance(super_md5, bytes):
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception(f"Super definition {child} has invalid data!")
|
2021-05-06 21:36:24 +02:00
|
|
|
supers.append((super_name, super_md5))
|
|
|
|
|
2019-12-08 22:43:49 +01:00
|
|
|
def get_children(parent: str, node: Node) -> None:
|
|
|
|
real_name = self.__fix_name(node.name)
|
2022-10-15 20:56:30 +02:00
|
|
|
if node.data_type == "3s32":
|
|
|
|
node_name = os.path.join(parent, real_name).replace(
|
|
|
|
f"{os.sep}imgfs{os.sep}", ""
|
|
|
|
)
|
2021-05-06 21:36:24 +02:00
|
|
|
ref = None
|
|
|
|
for subnode in node.children:
|
2022-10-15 20:56:30 +02:00
|
|
|
if subnode.name == "i":
|
2021-05-06 21:36:24 +02:00
|
|
|
super_ref = subnode.value
|
|
|
|
if super_ref > 0 or super_ref < len(supers):
|
|
|
|
ref = supers[super_ref][0]
|
|
|
|
else:
|
|
|
|
ref = supers[0][0]
|
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
files[node_name] = (
|
|
|
|
node.value[0] + data_index,
|
|
|
|
node.value[1],
|
|
|
|
node.value[2],
|
|
|
|
ref,
|
|
|
|
)
|
2019-12-08 22:43:49 +01:00
|
|
|
else:
|
|
|
|
for subchild in node.children:
|
2021-04-16 05:51:16 +02:00
|
|
|
get_children(os.path.join(parent, f"{real_name}{os.sep}"), subchild)
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
# Recursively walk the entire filesystem extracting files and their locations.
|
2021-04-16 05:51:16 +02:00
|
|
|
get_children(os.sep, header)
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
# Cache of other file data.
|
|
|
|
otherdata: Dict[str, IFS] = {}
|
|
|
|
|
2019-12-08 22:43:49 +01:00
|
|
|
for fn in files:
|
2021-05-06 21:36:24 +02:00
|
|
|
(start, size, pack_time, external_file) = files[fn]
|
|
|
|
if external_file is not None:
|
|
|
|
if external_file not in otherdata:
|
|
|
|
if self.__loader is None:
|
|
|
|
ifsdata = None
|
|
|
|
else:
|
|
|
|
ifsdata = self.__loader(external_file)
|
|
|
|
|
|
|
|
if ifsdata is None:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception(
|
|
|
|
f"Couldn't extract file data for {fn} referencing IFS file {external_file}!"
|
|
|
|
)
|
2021-05-06 21:36:24 +02:00
|
|
|
else:
|
|
|
|
otherdata[external_file] = ifsdata
|
|
|
|
|
|
|
|
if fn in otherdata[external_file].filenames:
|
|
|
|
filedata = otherdata[external_file].read_file(fn)
|
|
|
|
else:
|
|
|
|
raise Exception(f"{fn} not found in {external_file} IFS!")
|
|
|
|
else:
|
2022-10-15 20:56:30 +02:00
|
|
|
filedata = data[start : (start + size)]
|
2021-05-06 21:37:32 +02:00
|
|
|
if len(filedata) != size:
|
|
|
|
raise Exception(f"Couldn't extract file data for {fn}!")
|
2019-12-08 22:43:49 +01:00
|
|
|
self.__files[fn] = filedata
|
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
# Now, find all of the index files that are available.
|
|
|
|
for filename in list(self.__files.keys()):
|
2021-04-16 05:51:16 +02:00
|
|
|
abs_filename = (os.sep if filename.startswith(os.sep) else "") + filename
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-16 05:51:16 +02:00
|
|
|
if abs_filename.endswith(f"{os.sep}texturelist.xml"):
|
2021-04-14 03:08:13 +02:00
|
|
|
# This is a texture index.
|
|
|
|
texdir = os.path.dirname(filename)
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
benc = BinaryEncoding()
|
|
|
|
texdata = benc.decode(self.__files[filename])
|
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
if texdata is None:
|
|
|
|
# Now, try as XML
|
|
|
|
xenc = XmlEncoding()
|
2021-05-31 20:13:43 +02:00
|
|
|
encoding = "ascii"
|
2021-05-06 21:36:24 +02:00
|
|
|
texdata = xenc.decode(
|
2022-10-15 20:56:30 +02:00
|
|
|
b'<?xml encoding="ascii"?>' + self.__files[filename]
|
2021-05-06 21:36:24 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if texdata is None:
|
|
|
|
continue
|
2021-05-31 20:13:43 +02:00
|
|
|
else:
|
|
|
|
if benc.encoding is None:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception(
|
|
|
|
"Logic error, expected an encoding from binary decoder!"
|
|
|
|
)
|
2021-05-31 20:13:43 +02:00
|
|
|
encoding = benc.encoding
|
2021-05-06 21:36:24 +02:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
if texdata.name != "texturelist":
|
2021-04-14 03:08:13 +02:00
|
|
|
raise Exception(f"Unexpected name {texdata.name} in texture list!")
|
2021-05-06 21:36:24 +02:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
if texdata.attribute("compress") == "avslz":
|
2021-04-14 03:08:13 +02:00
|
|
|
compressed = True
|
|
|
|
else:
|
|
|
|
compressed = False
|
|
|
|
|
|
|
|
for child in texdata.children:
|
2022-10-15 20:56:30 +02:00
|
|
|
if child.name != "texture":
|
2021-04-14 03:08:13 +02:00
|
|
|
continue
|
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
textfmt = child.attribute("format")
|
2021-05-31 20:13:43 +02:00
|
|
|
if textfmt is None:
|
|
|
|
raise Exception(f"Texture {child} has no texture format!")
|
2021-04-14 03:08:13 +02:00
|
|
|
|
|
|
|
for subchild in child.children:
|
2022-10-15 20:56:30 +02:00
|
|
|
if subchild.name != "image":
|
2019-12-08 22:43:49 +01:00
|
|
|
continue
|
2022-10-15 20:56:30 +02:00
|
|
|
name = subchild.attribute("name")
|
2021-05-31 20:13:43 +02:00
|
|
|
if name is None:
|
|
|
|
raise Exception(f"Texture entry {subchild} has no name!")
|
2022-10-15 20:56:30 +02:00
|
|
|
if " " in name:
|
2021-10-24 19:12:06 +02:00
|
|
|
# Certain files that were corrupted on create or copy
|
|
|
|
# seem to have spaces in the name which shouldn't be
|
|
|
|
# allowed. Lob them off.
|
2022-10-15 20:56:30 +02:00
|
|
|
name = name[: name.find(" ")]
|
2021-05-31 20:13:43 +02:00
|
|
|
md5sum = hashlib.md5(name.encode(encoding)).hexdigest()
|
2021-04-14 03:08:13 +02:00
|
|
|
oldname = os.path.join(texdir, md5sum)
|
2021-05-31 20:13:43 +02:00
|
|
|
newname = os.path.join(texdir, name)
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
if oldname in self.__files:
|
|
|
|
supported = False
|
|
|
|
if self.__decode_textures:
|
2021-05-24 19:37:47 +02:00
|
|
|
if textfmt in ["argb8888rev", "dxt5"]:
|
2021-04-14 03:08:13 +02:00
|
|
|
# This is a supported file to decode
|
|
|
|
newname += ".png"
|
|
|
|
supported = True
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
# Remove old index, update file to new index.
|
|
|
|
self.__files[newname] = self.__files[oldname]
|
2021-05-06 21:36:24 +02:00
|
|
|
if not self.__keep_hex_names:
|
|
|
|
del self.__files[oldname]
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
# Remember the attributes for this file so we can extract it later.
|
|
|
|
self.__compressed[newname] = compressed
|
|
|
|
|
|
|
|
if supported:
|
|
|
|
# Only pop down the format and sizes if we support extracting.
|
|
|
|
self.__formats[newname] = textfmt
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
rect = subchild.child_value("imgrect")
|
2019-12-08 22:43:49 +01:00
|
|
|
if rect is not None:
|
2021-04-16 01:16:03 +02:00
|
|
|
self.__imgsize[newname] = (
|
|
|
|
rect[0] // 2,
|
|
|
|
rect[1] // 2,
|
|
|
|
rect[2] // 2,
|
|
|
|
rect[3] // 2,
|
|
|
|
)
|
2022-10-15 20:56:30 +02:00
|
|
|
rect = subchild.child_value("uvrect")
|
2021-04-16 01:16:03 +02:00
|
|
|
if rect is not None:
|
|
|
|
self.__uvsize[newname] = (
|
|
|
|
rect[0] // 2,
|
|
|
|
rect[1] // 2,
|
|
|
|
rect[2] // 2,
|
|
|
|
rect[3] // 2,
|
2019-12-08 22:43:49 +01:00
|
|
|
)
|
2021-04-16 05:51:16 +02:00
|
|
|
elif abs_filename.endswith(f"{os.sep}afplist.xml"):
|
2021-04-14 03:08:13 +02:00
|
|
|
# This is a texture index.
|
|
|
|
afpdir = os.path.dirname(filename)
|
|
|
|
bsidir = os.path.join(afpdir, "bsi")
|
|
|
|
geodir = os.path.join(os.path.dirname(afpdir), "geo")
|
|
|
|
|
|
|
|
benc = BinaryEncoding()
|
|
|
|
afpdata = benc.decode(self.__files[filename])
|
|
|
|
|
2021-05-06 21:36:24 +02:00
|
|
|
if afpdata is None:
|
|
|
|
# Now, try as XML
|
|
|
|
xenc = XmlEncoding()
|
2022-10-15 20:56:30 +02:00
|
|
|
encoding = "ascii"
|
2021-05-06 21:36:24 +02:00
|
|
|
afpdata = xenc.decode(
|
2022-10-15 20:56:30 +02:00
|
|
|
b'<?xml encoding="ascii"?>' + self.__files[filename]
|
2021-05-06 21:36:24 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if afpdata is None:
|
|
|
|
continue
|
2021-05-31 20:13:43 +02:00
|
|
|
else:
|
|
|
|
if benc.encoding is None:
|
2022-10-15 20:56:30 +02:00
|
|
|
raise Exception(
|
|
|
|
"Logic error, expected an encoding from binary decoder!"
|
|
|
|
)
|
2021-05-31 20:13:43 +02:00
|
|
|
encoding = benc.encoding
|
2021-05-06 21:36:24 +02:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
if afpdata.name != "afplist":
|
2021-04-14 03:08:13 +02:00
|
|
|
raise Exception(f"Unexpected name {afpdata.name} in afp list!")
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
for child in afpdata.children:
|
2022-10-15 20:56:30 +02:00
|
|
|
if child.name != "afp":
|
2021-04-14 03:08:13 +02:00
|
|
|
continue
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
# First, fix up the afp files themselves.
|
2022-10-15 20:56:30 +02:00
|
|
|
name = child.attribute("name")
|
2021-05-31 20:13:43 +02:00
|
|
|
if name is None:
|
|
|
|
raise Exception("AFP entry {child} has no name!")
|
|
|
|
md5sum = hashlib.md5(name.encode(encoding)).hexdigest()
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2021-04-14 03:08:13 +02:00
|
|
|
for fixdir in [afpdir, bsidir]:
|
|
|
|
oldname = os.path.join(fixdir, md5sum)
|
|
|
|
newname = os.path.join(fixdir, name)
|
|
|
|
|
|
|
|
if oldname in self.__files:
|
|
|
|
# Remove old index, update file to new index.
|
|
|
|
self.__files[newname] = self.__files[oldname]
|
2021-05-06 21:36:24 +02:00
|
|
|
if not self.__keep_hex_names:
|
|
|
|
del self.__files[oldname]
|
2021-04-14 03:08:13 +02:00
|
|
|
|
|
|
|
# Now, fix up the shape files as well.
|
|
|
|
geodata = child.child_value("geo")
|
|
|
|
if geodata is not None:
|
|
|
|
for geoid in geodata:
|
|
|
|
geoname = f"{name}_shape{geoid}"
|
2021-05-31 20:13:43 +02:00
|
|
|
md5sum = hashlib.md5(geoname.encode(encoding)).hexdigest()
|
2021-04-14 03:08:13 +02:00
|
|
|
|
|
|
|
oldname = os.path.join(geodir, md5sum)
|
|
|
|
newname = os.path.join(geodir, geoname)
|
|
|
|
|
|
|
|
if oldname in self.__files:
|
|
|
|
# Remove old index, update file to new index.
|
|
|
|
self.__files[newname] = self.__files[oldname]
|
2021-05-06 21:36:24 +02:00
|
|
|
if not self.__keep_hex_names:
|
|
|
|
del self.__files[oldname]
|
2019-12-08 22:43:49 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def filenames(self) -> List[str]:
|
|
|
|
return [f for f in self.__files]
|
|
|
|
|
|
|
|
def read_file(self, filename: str) -> bytes:
|
2021-04-14 03:08:13 +02:00
|
|
|
# First, figure out if this file is stored compressed or not. If it is, decompress
|
|
|
|
# it so that we have the raw data available to us.
|
|
|
|
decompress = self.__compressed.get(filename, False)
|
2019-12-08 22:43:49 +01:00
|
|
|
filedata = self.__files[filename]
|
|
|
|
if decompress:
|
2022-10-15 20:56:30 +02:00
|
|
|
uncompressed_size, compressed_size = struct.unpack(">II", filedata[0:8])
|
2019-12-08 22:43:49 +01:00
|
|
|
if len(filedata) == compressed_size + 8:
|
|
|
|
lz77 = Lz77()
|
|
|
|
filedata = lz77.decompress(filedata[8:])
|
|
|
|
else:
|
2021-05-06 21:36:43 +02:00
|
|
|
filedata = filedata[8:] + filedata[0:8]
|
2019-12-08 22:43:49 +01:00
|
|
|
|
2022-10-15 20:56:30 +02:00
|
|
|
if self.__decode_binxml and os.path.splitext(filename)[1] == ".xml":
|
2019-12-08 22:43:49 +01:00
|
|
|
benc = BinaryEncoding()
|
|
|
|
filexml = benc.decode(filedata)
|
|
|
|
if filexml is not None:
|
2022-10-15 20:56:30 +02:00
|
|
|
filedata = str(filexml).encode("utf-8")
|
|
|
|
|
|
|
|
if (
|
|
|
|
self.__decode_textures
|
|
|
|
and filename in self.__formats
|
|
|
|
and filename in self.__imgsize
|
|
|
|
and filename in self.__uvsize
|
|
|
|
):
|
2021-04-14 03:08:13 +02:00
|
|
|
fmt = self.__formats[filename]
|
2021-04-16 01:16:03 +02:00
|
|
|
img = self.__imgsize[filename]
|
|
|
|
crop = self.__uvsize[filename]
|
2021-04-14 03:08:13 +02:00
|
|
|
|
|
|
|
# Decode the image data itself.
|
2021-05-24 19:37:47 +02:00
|
|
|
width = img[1] - img[0]
|
|
|
|
height = img[3] - img[2]
|
|
|
|
|
2019-12-08 22:43:49 +01:00
|
|
|
if fmt == "argb8888rev":
|
2021-04-16 01:16:03 +02:00
|
|
|
if len(filedata) < (width * height * 4):
|
|
|
|
left = (width * height * 4) - len(filedata)
|
2022-10-15 20:56:30 +02:00
|
|
|
filedata = filedata + b"\x00" * left
|
|
|
|
png = Image.frombytes("RGBA", (width, height), filedata, "raw", "BGRA")
|
|
|
|
png = png.crop(
|
|
|
|
(
|
|
|
|
crop[0] - img[0],
|
|
|
|
crop[2] - img[2],
|
|
|
|
crop[1] - img[0],
|
|
|
|
crop[3] - img[2],
|
|
|
|
)
|
|
|
|
)
|
2019-12-08 22:43:49 +01:00
|
|
|
b = io.BytesIO()
|
2022-10-15 20:56:30 +02:00
|
|
|
png.save(b, format="PNG")
|
2019-12-08 22:43:49 +01:00
|
|
|
filedata = b.getvalue()
|
2021-05-24 19:37:47 +02:00
|
|
|
elif fmt == "dxt5":
|
|
|
|
dxt = DXTBuffer(width, height)
|
|
|
|
png = Image.frombuffer(
|
2022-10-15 20:56:30 +02:00
|
|
|
"RGBA",
|
2021-05-24 19:37:47 +02:00
|
|
|
(width, height),
|
|
|
|
dxt.DXT5Decompress(filedata, swap=True),
|
2022-10-15 20:56:30 +02:00
|
|
|
"raw",
|
|
|
|
"RGBA",
|
2021-05-24 19:37:47 +02:00
|
|
|
0,
|
|
|
|
1,
|
|
|
|
)
|
2022-10-15 20:56:30 +02:00
|
|
|
png = png.crop(
|
|
|
|
(
|
|
|
|
crop[0] - img[0],
|
|
|
|
crop[2] - img[2],
|
|
|
|
crop[1] - img[0],
|
|
|
|
crop[3] - img[2],
|
|
|
|
)
|
|
|
|
)
|
2021-05-24 19:37:47 +02:00
|
|
|
b = io.BytesIO()
|
2022-10-15 20:56:30 +02:00
|
|
|
png.save(b, format="PNG")
|
2021-05-24 19:37:47 +02:00
|
|
|
filedata = b.getvalue()
|
2019-12-08 22:43:49 +01:00
|
|
|
|
|
|
|
return filedata
|