1
0
mirror of synced 2025-02-20 20:50:59 +01:00

Very primitive 3D transform support. Animations using cameras/3D now display with the proper perspective, but there are lots of issues.

This commit is contained in:
Jennifer Taylor 2021-08-03 17:04:20 +00:00
parent 52300c40b7
commit b9b85bf146
8 changed files with 314 additions and 22 deletions

View File

@ -1,9 +1,11 @@
try:
# If we compiled the faster cython/c++ code, we can use it instead!
from .blendcpp import affine_composite
from .blendcpp import perspective_composite
except ImportError:
# If we didn't, then fall back to the pure python implementation.
from .blend import affine_composite
from .blend import perspective_composite
__all__ = ["affine_composite"]
__all__ = ["affine_composite", "perspective_composite"]

View File

@ -549,3 +549,65 @@ def pixel_renderer(
# Blend it.
texoff = (texx + (texy * texwidth)) * 4
return blend_point(add_color, mult_color, texbytes[texoff:(texoff + 4)], imgbytes[imgoff:(imgoff + 4)], blendfunc)
def perspective_composite(
img: Image.Image,
add_color: Color,
mult_color: Color,
transform: Matrix,
camera: Point,
focal_length: float,
mask: Optional[Image.Image],
blendfunc: int,
texture: Image.Image,
single_threaded: bool = False,
enable_aa: bool = True,
) -> Image.Image:
# Warn if we have an unsupported blend.
if blendfunc not in {0, 1, 2, 3, 8, 9, 70, 256, 257}:
print(f"WARNING: Unsupported blend {blendfunc}")
return img
# These are calculated properties and caching them outside of the loop
# speeds things up a bit.
imgwidth = img.width
imgheight = img.height
texwidth = texture.width
texheight = texture.height
# Get the data in an easier to manipulate and faster to update fashion.
imgbytes = bytearray(img.tobytes('raw', 'RGBA'))
texbytes = texture.tobytes('raw', 'RGBA')
if mask:
alpha = mask.split()[-1]
maskbytes = alpha.tobytes('raw', 'L')
else:
maskbytes = None
for texy in range(texheight):
for texx in range(texwidth):
# Calculate perspective projection.
imgloc = transform.multiply_point(Point(texx, texy))
perspective = focal_length / (imgloc.z - camera.z)
imgx = int(((imgloc.x - camera.x) * perspective) + camera.x)
imgy = int(((imgloc.y - camera.y) * perspective) + camera.y)
# Check clipping.
if imgx < 0 or imgx >= imgwidth:
continue
if imgy < 0 or imgy >= imgheight:
continue
# Check mask rectangle.
maskoff = imgx + (imgy * imgwidth)
imgoff = maskoff * 4
if maskbytes is not None and maskbytes[maskoff] == 0:
continue
# Blend it.
texoff = (texx + (texy * texwidth)) * 4
imgbytes[imgoff:(imgoff + 4)] = blend_point(add_color, mult_color, texbytes[texoff:(texoff + 4)], imgbytes[imgoff:(imgoff + 4)], blendfunc)
img = Image.frombytes('RGBA', (imgwidth, imgheight), bytes(imgbytes))
return img

View File

@ -1,7 +1,7 @@
from PIL import Image # type: ignore
from typing import Optional
from ..types import Color, Matrix
from ..types import Color, Point, Matrix
def affine_composite(
@ -16,3 +16,19 @@ def affine_composite(
enable_aa: bool = ...,
) -> Image.Image:
...
def perspective_composite(
img: Image.Image,
add_color: Color,
mult_color: Color,
transform: Matrix,
camera: Point,
focal_length: float,
mask: Optional[Image.Image],
blendfunc: int,
texture: Image.Image,
single_threaded: bool = ...,
enable_aa: bool = ...,
) -> Image.Image:
...

View File

@ -44,6 +44,26 @@ cdef extern int affine_composite_fast(
unsigned int enable_aa,
)
cdef extern int perspective_composite_fast(
unsigned char *imgbytes,
unsigned char *maskbytes,
unsigned int imgwidth,
unsigned int imgheight,
float camera_x,
float camera_y,
float camera_z,
float focal_length,
floatcolor_t add_color,
floatcolor_t mult_color,
matrix_t transform,
int blendfunc,
unsigned char *texbytes,
unsigned int texwidth,
unsigned int texheight,
unsigned int threads,
unsigned int enable_aa
)
def affine_composite(
img: Image.Image,
add_color: Color,
@ -144,3 +164,82 @@ def affine_composite(
# first this function appears to return None.
img = Image.frombytes('RGBA', (imgwidth, imgheight), imgbytes)
return img
def perspective_composite(
img: Image.Image,
add_color: Color,
mult_color: Color,
transform: Matrix,
camera: Point,
focal_length: float,
mask: Optional[Image.Image],
blendfunc: int,
texture: Image.Image,
single_threaded: bool = False,
enable_aa: bool = True,
) -> Image.Image:
if blendfunc not in {0, 1, 2, 3, 8, 9, 70, 256, 257}:
print(f"WARNING: Unsupported blend {blendfunc}")
return img
# These are calculated properties and caching them outside of the loop
# speeds things up a bit.
imgwidth = img.width
imgheight = img.height
texwidth = texture.width
texheight = texture.height
# Grab the raw image data.
imgbytes = img.tobytes('raw', 'RGBA')
texbytes = texture.tobytes('raw', 'RGBA')
# Grab the mask data.
if mask is not None:
alpha = mask.split()[-1]
maskdata = alpha.tobytes('raw', 'L')
else:
maskdata = None
cdef unsigned char *maskbytes = NULL
if maskdata is not None:
maskbytes = maskdata
# Convert classes to C structs.
cdef floatcolor_t c_addcolor = floatcolor_t(r=add_color.r, g=add_color.g, b=add_color.b, a=add_color.a)
cdef floatcolor_t c_multcolor = floatcolor_t(r=mult_color.r, g=mult_color.g, b=mult_color.b, a=mult_color.a)
cdef matrix_t c_transform = matrix_t(
a11=transform.a11, a12=transform.a12, a13=transform.a13,
a21=transform.a21, a22=transform.a22, a23=transform.a23,
a31=transform.a31, a32=transform.a32, a33=transform.a33,
a41=transform.a41, a42=transform.a42, a43=transform.a43,
)
cdef unsigned int threads = 1 if single_threaded else multiprocessing.cpu_count()
# Call the C++ function.
errors = perspective_composite_fast(
imgbytes,
maskbytes,
imgwidth,
imgheight,
camera.x,
camera.y,
camera.z,
focal_length,
c_addcolor,
c_multcolor,
c_transform,
blendfunc,
texbytes,
texwidth,
texheight,
threads,
1 if enable_aa else 0,
)
if errors != 0:
raise Exception("Error raised in C++!")
# We blitted in-place, return that. There seems to be a reference bug in Cython
# when called from compiled mypyc code, so if we don't assign to a local variable
# first this function appears to return None.
img = Image.frombytes('RGBA', (imgwidth, imgheight), imgbytes)
return img

View File

@ -584,4 +584,58 @@ extern "C"
return 0;
}
int perspective_composite_fast(
unsigned char *imgbytes,
unsigned char *maskbytes,
unsigned int imgwidth,
unsigned int imgheight,
float camera_x,
float camera_y,
float camera_z,
float focal_length,
floatcolor_t add_color,
floatcolor_t mult_color,
matrix_t transform,
int blendfunc,
unsigned char *texbytes,
unsigned int texwidth,
unsigned int texheight,
unsigned int threads,
unsigned int enable_aa
) {
// Cast to a usable type.
intcolor_t *imgdata = (intcolor_t *)imgbytes;
intcolor_t *texdata = (intcolor_t *)texbytes;
for (unsigned int texy = 0; texy < texheight; texy++) {
for (unsigned int texx = 0; texx < texwidth; texx++) {
// Calculate perspective projection.
point_t imgloc = transform.multiply_point((point_t){(float)texx, (float)texy});
float perspective = focal_length / (imgloc.z - camera_z);
int imgx = ((imgloc.x - camera_x) * perspective) + camera_x;
int imgy = ((imgloc.y - camera_y) * perspective) + camera_y;
// Check clipping.
if (imgx < 0 || imgx >= (int)imgwidth) {
continue;
}
if (imgy < 0 || imgy >= (int)imgheight) {
continue;
}
// Check mask rectangle.
unsigned int imgoff = imgx + (imgy * imgwidth);
if (maskbytes != NULL && maskbytes[imgoff] == 0) {
continue;
}
// Blend it.
unsigned int texoff = (texx + (texy * texwidth));
imgdata[imgoff] = blend_point(add_color, mult_color, texdata[texoff], imgdata[imgoff], blendfunc);
}
}
return 0;
}
}

View File

@ -1,7 +1,7 @@
from typing import Any, Dict, Generator, List, Set, Tuple, Optional, Union
from PIL import Image # type: ignore
from .blend import affine_composite
from .blend import affine_composite, perspective_composite
from .swf import (
SWF,
Frame,
@ -453,6 +453,7 @@ class AFPRenderer(VerboseOutput):
# Internal render parameters.
self.__registered_objects: Dict[int, Union[RegisteredShape, RegisteredClip, RegisteredImage, RegisteredDummy]] = {}
self.__root: Optional[PlacedClip] = None
self.__camera: Optional[AP2PlaceCameraTag] = None
# List of imports that we provide stub implementations for.
self.__stubbed_swfs: Set[str] = {
@ -975,7 +976,8 @@ class AFPRenderer(VerboseOutput):
return None, False
elif isinstance(tag, AP2PlaceCameraTag):
print("WARNING: Unhandled PLACE_CAMERA tag!")
self.vprint(f"{prefix} Place camera tag.")
self.__camera = tag
# Didn't place a new clip.
return None, False
@ -1135,7 +1137,32 @@ class AFPRenderer(VerboseOutput):
texture = shape.rectangle
if texture is not None:
img = affine_composite(img, add_color, mult_color, transform, mask, blend, texture, single_threaded=self.__single_threaded, enable_aa=self.__enable_aa)
if transform.is_affine or self.__camera is None:
img = affine_composite(
img,
add_color,
mult_color,
transform,
mask,
blend,
texture,
single_threaded=self.__single_threaded,
enable_aa=self.__enable_aa,
)
else:
img = perspective_composite(
img,
add_color,
mult_color,
transform,
self.__camera.center,
self.__camera.focal_length,
mask,
blend,
texture,
single_threaded=self.__single_threaded,
enable_aa=self.__enable_aa,
)
elif isinstance(renderable, PlacedImage):
if only_depths is not None and renderable.depth not in only_depths:
# Not on the correct depth plane.
@ -1143,7 +1170,32 @@ class AFPRenderer(VerboseOutput):
# This is a shape draw reference.
texture = self.textures[renderable.source.reference]
img = affine_composite(img, add_color, mult_color, transform, mask, blend, texture, single_threaded=self.__single_threaded, enable_aa=self.__enable_aa)
if transform.is_affine or self.__camera is None:
img = affine_composite(
img,
add_color,
mult_color,
transform,
mask,
blend,
texture,
single_threaded=self.__single_threaded,
enable_aa=self.__enable_aa,
)
else:
img = perspective_composite(
img,
add_color,
mult_color,
transform,
self.__camera.center,
self.__camera.focal_length,
mask,
blend,
texture,
single_threaded=self.__single_threaded,
enable_aa=self.__enable_aa,
)
elif isinstance(renderable, PlacedDummy):
# Nothing to do!
pass
@ -1482,7 +1534,16 @@ class AFPRenderer(VerboseOutput):
# Now, render out the placed objects.
color = swf.color or Color(0.0, 0.0, 0.0, 0.0)
curimage = Image.new("RGBA", (resized_width, resized_height), color=color.as_tuple())
curimage = self.__render_object(curimage, root_clip, movie_transform, movie_mask, actual_mult_color, actual_add_color, actual_blend, only_depths=only_depths)
curimage = self.__render_object(
curimage,
root_clip,
movie_transform,
movie_mask,
actual_mult_color,
actual_add_color,
actual_blend,
only_depths=only_depths,
)
else:
# Nothing changed, make a copy of the previous render.
self.vprint(" Using previous frame render")

View File

@ -1455,17 +1455,15 @@ class SWF(TrackedCoverage, VerboseOutput):
running_pointer += 36
floats = [x / 1024.0 for x in ints]
if False:
# TODO: Start actually doing perspective projections when needed.
transform.a11 = floats[0]
transform.a12 = floats[1]
transform.a13 = floats[2]
transform.a21 = floats[3]
transform.a22 = floats[4]
transform.a23 = floats[5]
transform.a31 = floats[6]
transform.a32 = floats[7]
transform.a33 = floats[8]
transform.a11 = floats[0]
transform.a12 = floats[1]
transform.a13 = floats[2]
transform.a21 = floats[3]
transform.a22 = floats[4]
transform.a23 = floats[5]
transform.a31 = floats[6]
transform.a32 = floats[7]
transform.a33 = floats[8]
self.vprint(f"{prefix} 3D Transform Matrix: {', '.join(str(f) for f in floats)}")

View File

@ -183,7 +183,7 @@ class Matrix:
)
@property
def __is_affine(self) -> bool:
def is_affine(self) -> bool:
return (
round(abs(self.a13), 5) == 0.0 and
round(abs(self.a23), 5) == 0.0 and
@ -194,7 +194,7 @@ class Matrix:
)
def as_dict(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
if self.__is_affine:
if self.is_affine:
return {
'a': self.a,
'b': self.b,
@ -235,7 +235,7 @@ class Matrix:
a43=self.a43,
)
if not other.__is_affine:
if not other.is_affine:
new.a11 = other.a11
new.a12 = other.a12
new.a13 = other.a13
@ -483,7 +483,7 @@ class Matrix:
)
def __repr__(self) -> str:
if self.__is_affine:
if self.is_affine:
return f"a: {round(self.a, 5)}, b: {round(self.b, 5)}, c: {round(self.c, 5)}, d: {round(self.d, 5)}, tx: {round(self.tx, 5)}, ty: {round(self.ty, 5)}"
else:
return "; ".join([