cleanup: float stuff

This commit is contained in:
bnnm 2024-08-31 20:44:08 +02:00
parent 6237a2f7bf
commit f95f5441ca
9 changed files with 169 additions and 123 deletions

View File

@ -49,10 +49,12 @@ static void prepare_mixing(libvgmstream_priv_t* priv, libvgmstream_options_t* op
vgmstream_mixing_stereo_only(priv->vgmstream, opt->stereo_track - 1);
}
if (priv->cfg.force_pcm16)
if (priv->cfg.force_pcm16) {
mixing_macro_output_sample_format(priv->vgmstream, SFMT_S16);
else if (priv->cfg.force_float)
}
else if (priv->cfg.force_float) {
mixing_macro_output_sample_format(priv->vgmstream, SFMT_FLT);
}
vgmstream_mixing_enable(priv->vgmstream, INTERNAL_BUF_SAMPLES, NULL /*&input_channels*/, NULL /*&output_channels*/);
}

View File

@ -7,26 +7,22 @@
#include <math.h>
#include <limits.h>
//TODO simplify
/**
* Mixer modifies decoded sample buffer before final output. This is implemented
* mostly with simplicity in mind rather than performance. Process:
* with simplicity in mind rather than performance. Process:
* - detect if mixing applies at current moment or exit (mini performance optimization)
* - copy/upgrade buf to float mixbuf if needed
* - do mixing ops
* - copy/downgrade mixbuf to original buf if needed
*
* Mixing may add or remove channels. input_channels is the buf's original channels,
* and output_channels the resulting buf's channels. buf and mixbuf must be
* as big as max channels (mixing_channels).
*
* Mixing ops are added by a meta (ex. TXTP) or plugin through the API. Non-sensical
* mixes are ignored (to avoid rechecking every time).
*
* Currently, mixing must be manually enabled before starting to decode, because plugins
* need to setup bigger bufs when upmixing. (to be changed)
* Mixing ops are added by a meta (ex. TXTP) or plugins through API. Non-sensical config
* is ignored on add (to avoid rechecking every time).
*
* segmented/layered layouts handle mixing on their own.
* Mixing may add or remove channels or change sample format. external buf and internal mixbuf
* are expected to be as big as needed. Currently, mixing must be manually enabled before starting
* to decode, because plugins need to setup appropriate bufs. (to be changed)
*
* segmented/layered layouts handle mixing vgmstream sample bufs on their own.
*/
mixer_t* mixer_init(int channels) {

View File

@ -8,29 +8,7 @@
#include "mixer_priv.h"
#include "sbuf.h"
//TODO simplify
/**
* Mixer modifies decoded sample buffer before final output. This is implemented
* mostly with simplicity in mind rather than performance. Process:
* - detect if mixing applies at current moment or exit (mini performance optimization)
* - copy/upgrade buf to float mixbuf if needed
* - do mixing ops
* - copy/downgrade mixbuf to original buf if needed
*
* Mixing may add or remove channels. input_channels is the buf's original channels,
* and output_channels the resulting buf's channels. buf and mixbuf must be
* as big as max channels (mixing_channels).
*
* Mixing ops are added by a meta (ex. TXTP) or plugin through the API. Non-sensical
* mixes are ignored (to avoid rechecking every time).
*
* Currently, mixing must be manually enabled before starting to decode, because plugins
* need to setup bigger bufs when upmixing. (to be changed)
*
* segmented/layered layouts handle mixing on their own.
*/
/* ******************************************************************* */
/* Wrapper/helpers for vgmstream's "mixer", which does main sample buffer transformations */
static int32_t get_current_pos(VGMSTREAM* vgmstream, int32_t sample_count) {
int32_t current_pos;
@ -166,6 +144,7 @@ void mixing_info(VGMSTREAM* vgmstream, int* p_input_channels, int* p_output_chan
sfmt_t mixing_get_input_sample_type(VGMSTREAM* vgmstream) {
// TODO: check vgmstream
// TODO: on layered/segments, detect biggest value and use that (ex. if one of the layers uses flt > flt)
return SFMT_S16;
}

View File

@ -137,7 +137,7 @@ int render_layout(sbuf_t* sbuf, VGMSTREAM* vgmstream) {
render_vgmstream_blocked(buf, sample_count, vgmstream);
break;
case layout_segmented:
render_vgmstream_segmented(buf, sample_count,vgmstream);
render_vgmstream_segmented(sbuf, vgmstream);
break;
case layout_layered:
render_vgmstream_layered(sbuf, vgmstream);

View File

@ -42,7 +42,6 @@ int sfmt_get_sample_size(sfmt_t fmt) {
}
}
#if 0
void* sbuf_get_filled_buf(sbuf_t* sbuf) {
int sample_size = sfmt_get_sample_size(sbuf->fmt);
@ -50,7 +49,6 @@ void* sbuf_get_filled_buf(sbuf_t* sbuf) {
buf += sbuf->filled * sbuf->channels * sample_size;
return buf;
}
#endif
void sbuf_consume(sbuf_t* sbuf, int count) {
int sample_size = sfmt_get_sample_size(sbuf->fmt);
@ -159,79 +157,146 @@ void sbuf_copy_from_f32(sbuf_t* sbuf, float* src) {
}
}
void sbuf_copy_segments(sample_t* dst, int dst_channels, sample_t* src, int src_channels, int samples_to_do, int samples_filled) {
int pos = samples_filled * dst_channels;
if (src_channels == dst_channels) { /* most common and probably faster */
for (int s = 0; s < samples_to_do * dst_channels; s++) {
dst[pos + s] = src[s];
}
/* ugly thing to avoid repeating functions */
#define sbuf_copy_segments_internal(dst, src, src_pos, dst_pos, src_max) \
while (src_pos < src_max) { \
dst[dst_pos++] = src[src_pos++]; \
}
else {
for (int s = 0; s < samples_to_do; s++) {
for (int ch = 0; ch < src_channels; ch++) {
dst[pos + s * dst_channels + ch] = src[s * src_channels + ch];
}
for (int ch = src_channels; ch < dst_channels; ch++) {
dst[pos + s * dst_channels + ch] = 0;
#define sbuf_copy_segments_internal_s16(dst, src, src_pos, dst_pos, src_max, value) \
while (src_pos < src_max) { \
dst[dst_pos++] = clamp16(float_to_int(src[src_pos++] * value)); \
}
#define sbuf_copy_segments_internal_flt(dst, src, src_pos, dst_pos, src_max, value) \
while (src_pos < src_max) { \
dst[dst_pos++] = float_to_int(src[src_pos++] * value); \
}
void sbuf_copy_segments(sbuf_t* sdst, sbuf_t* ssrc) {
/* uncommon so probably fine albeit slower-ish, 0'd other channels first */
if (ssrc->channels != sdst->channels) {
sbuf_silence_part(sdst, sdst->filled, ssrc->filled);
sbuf_copy_layers(sdst, ssrc, 0, ssrc->filled);
#if 0
// "faster" but lots of extra ifs, not worth it
while (src_pos < src_max) {
for (int ch = 0; ch < dst_channels; ch++) {
dst[dst_pos++] = ch >= src_channels ? 0 : src[src_pos++];
}
}
#endif
return;
}
int src_pos = 0;
int dst_pos = sdst->filled * sdst->channels;
int src_max = ssrc->filled * ssrc->channels;
// define all posible combos, probably there is a better way to handle this but...
if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_S16) {
int16_t* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_segments_internal(dst, src, src_pos, dst_pos, src_max);
}
else if (sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_S16) {
float* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_segments_internal(dst, src, src_pos, dst_pos, src_max);
}
else if ((sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_F32) || (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_FLT)) {
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_segments_internal(dst, src, src_pos, dst_pos, src_max);
}
// to s16
else if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_F32) {
int16_t* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_segments_internal_s16(dst, src, src_pos, dst_pos, src_max, 1.0f);
}
else if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_FLT) {
int16_t* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_segments_internal_s16(dst, src, src_pos, dst_pos, src_max, 32768.0f);
}
// to f32
else if (sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_FLT) {
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_segments_internal_flt(dst, src, src_pos, dst_pos, src_max, 32768.0f);
}
// to flt
else if (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_S16) {
float* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_segments_internal_flt(dst, src, src_pos, dst_pos, src_max, (1/32768.0f));
}
else if (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_F32) {
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_segments_internal_flt(dst, src, src_pos, dst_pos, src_max, (1/32768.0f));
}
}
//TODO fix missing ->channels
/* ugly thing to avoid repeating functions */
// dst_channels == src_channels isn't likely so ignore that unlikely optimization
// sometimes one layer has less samples than others and need to 0-fill rest
#define sbuf_copy_layers_internal(dst, src, expectedsrc_pos, dst_pos, expected, dst_ch_step) \
for (int s = 0; s < ssrc->filled; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
#define sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step) \
for (int s = 0; s < src_filled; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = src[src_pos++]; \
} \
dst_pos += dst_ch_step; \
} \
\
for (int s = ssrc->filled; s < expected; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
for (int s = src_filled; s < dst_expected; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = 0; \
} \
dst_pos += dst_ch_step; \
}
// float +-1.0 <> pcm +-32768.0
#define sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, expected, dst_ch_step, value) \
for (int s = 0; s < ssrc->filled; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
dst[dst_pos++] = float_to_int(src[src_pos++] * value); \
#define sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, value) \
for (int s = 0; s < src_filled; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = clamp16(float_to_int(src[src_pos++] * value)); \
} \
dst_pos += dst_ch_step; \
} \
\
for (int s = ssrc->filled; s < expected; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
for (int s = src_filled; s < dst_expected; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = 0; \
} \
dst_pos += dst_ch_step; \
}
// float +-1.0 <> pcm +-32768.0
#define sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, expected, dst_ch_step, value) \
for (int s = 0; s < ssrc->filled; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
#define sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, value) \
for (int s = 0; s < src_filled; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = float_to_int(src[src_pos++] * value); \
} \
dst_pos += dst_ch_step; \
} \
\
for (int s = ssrc->filled; s < expected; s++) { \
for (int src_ch = 0; src_ch < ssrc->channels; src_ch++) { \
for (int s = src_filled; s < dst_expected; s++) { \
for (int src_ch = 0; src_ch < src_channels; src_ch++) { \
dst[dst_pos++] = 0; \
} \
dst_pos += dst_ch_step; \
}
/* copy interleaving */
void sbuf_copy_layers(sbuf_t* sdst, sbuf_t* ssrc, int dst_ch_start, int expected) {
/* copy interleaving: dst ch1 ch2 ch3 ch4 w/ src ch1 ch2 ch1 ch2 = only fill dst ch1 ch2 */
// dst_channels == src_channels isn't likely so ignore that optimization
// sometimes one layer has less samples than others and need to 0-fill rest
void sbuf_copy_layers(sbuf_t* sdst, sbuf_t* ssrc, int dst_ch_start, int dst_expected) {
int src_filled = ssrc->filled;
int src_channels = ssrc->channels;
int dst_ch_step = (sdst->channels - ssrc->channels); \
int src_pos = 0;
int dst_pos = sdst->filled * sdst->channels + dst_ch_start;
@ -242,56 +307,48 @@ void sbuf_copy_layers(sbuf_t* sdst, sbuf_t* ssrc, int dst_ch_start, int expected
if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_S16) {
int16_t* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, expected, dst_ch_step);
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step);
}
else if (sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_S16) {
float* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, expected, dst_ch_step);
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step);
}
else if ((sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_F32) || (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_FLT)) {
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, expected, dst_ch_step);
sbuf_copy_layers_internal(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step);
}
// to s16
else if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_F32) {
int16_t* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, expected, dst_ch_step, 1.0f);
sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, 1.0f);
}
else if (sdst->fmt == SFMT_S16 && ssrc->fmt == SFMT_FLT) {
int16_t* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, expected, dst_ch_step, 32768.0f);
sbuf_copy_layers_internal_s16(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, 32768.0f);
}
// to f32
else if (sdst->fmt == SFMT_F32 && ssrc->fmt == SFMT_FLT) {
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, expected, dst_ch_step, 32768.0f);
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, 32768.0f);
}
// to flt
else if (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_S16) {
float* dst = sdst->buf;
int16_t* src = ssrc->buf;
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, expected, dst_ch_step, (1/32768.0f));
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, (1/32768.0f));
}
else if (sdst->fmt == SFMT_FLT && ssrc->fmt == SFMT_F32) {
int16_t* dst = sdst->buf;
float* dst = sdst->buf;
float* src = ssrc->buf;
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, expected, dst_ch_step, (1/32768.0f));
sbuf_copy_layers_internal_flt(dst, src, src_pos, dst_pos, src_filled, dst_expected, src_channels, dst_ch_step, (1/32768.0f));
}
}
bool sbuf_realloc(sample_t** dst, int samples, int channels) {
sample_t* outbuf_re = realloc(*dst, samples * channels * sizeof(sample_t));
if (!outbuf_re) return false;
*dst = outbuf_re;
return true;
}
void sbuf_silence_s16(sample_t* dst, int samples, int channels, int filled) {
memset(dst + filled * channels, 0, (samples - filled) * channels * sizeof(sample_t));
}

View File

@ -33,19 +33,16 @@ void sbuf_init_f32(sbuf_t* sbuf, float* buf, int samples, int channels);
int sfmt_get_sample_size(sfmt_t fmt);
//void* sbuf_get_filled_buf(sbuf_t* sbuf);
//void sbuf_clamp(sbuf_t* sbuf, int samples);
void* sbuf_get_filled_buf(sbuf_t* sbuf);
/* move buf by samples amount to simplify some code (will lose base buf pointer) */
void sbuf_consume(sbuf_t* sbuf, int count);
// TODO decide if using float 1.0 style or 32767 style (fuzzy PCM changes when doing that)
/* helpers to copy between buffers; note they assume dst and src aren't the same buf */
void sbuf_copy_to_f32(float* dst, sbuf_t* sbuf);
void sbuf_copy_from_f32(sbuf_t* sbuf, float* src);
void sbuf_copy_segments(sample_t* dst, int dst_channels, sample_t* src, int src_channels, int samples_to_do, int samples_filled);
void sbuf_copy_segments(sbuf_t* sdst, sbuf_t* ssrc);
void sbuf_copy_layers(sbuf_t* sdst, sbuf_t* ssrc, int dst_ch_start, int expected);
bool sbuf_realloc(sample_t** dst, int samples, int channels);
void sbuf_silence_s16(sample_t* dst, int samples, int channels, int filled);
void sbuf_silence_rest(sbuf_t* sbuf);

View File

@ -36,7 +36,7 @@ void render_vgmstream_layered(sbuf_t* sdst, VGMSTREAM* vgmstream) {
if (samples_to_do <= 0) { /* when decoding more than num_samples */
VGM_LOG_ONCE("LAYERED: wrong samples_to_do\n");
break;
goto decode_fail;
}
/* decode all layers */
@ -58,6 +58,8 @@ void render_vgmstream_layered(sbuf_t* sdst, VGMSTREAM* vgmstream) {
vgmstream->samples_into_block += samples_to_do;
}
return;
decode_fail:
sbuf_silence_rest(sdst);
}

View File

@ -25,7 +25,7 @@ typedef struct {
bool mixed_channels; /* segments have different number of channels */
} segmented_layout_data;
void render_vgmstream_segmented(sample_t* buffer, int32_t sample_count, VGMSTREAM* vgmstream);
void render_vgmstream_segmented(sbuf_t* sbuf, VGMSTREAM* vgmstream);
segmented_layout_data* init_layout_segmented(int segment_count);
bool setup_layout_segmented(segmented_layout_data* data);
void free_layout_segmented(segmented_layout_data* data);

View File

@ -4,6 +4,7 @@
#include "../base/mixing.h"
#include "../base/plugins.h"
#include "../base/sbuf.h"
#include "../base/render.h"
#define VGMSTREAM_MAX_SEGMENTS 1024
#define VGMSTREAM_SEGMENT_SAMPLE_BUFFER 8192
@ -12,18 +13,15 @@
/* Decodes samples for segmented streams.
* Chains together sequential vgmstreams, for data divided into separate sections or files
* (like one part for intro and other for loop segments, which may even use different codecs). */
void render_vgmstream_segmented(sample_t* outbuf, int32_t sample_count, VGMSTREAM* vgmstream) {
void render_vgmstream_segmented(sbuf_t* sbuf, VGMSTREAM* vgmstream) {
segmented_layout_data* data = vgmstream->layout_data;
bool use_internal_buffer = false;
sbuf_t ssrc_tmp;
sbuf_t* ssrc = &ssrc_tmp;
/* normally uses outbuf directly (faster?) but could need internal buffer if downmixing */
if (vgmstream->channels != data->input_channels || data->mixed_channels) {
use_internal_buffer = true;
}
if (data->current_segment >= data->segment_count) {
VGM_LOG_ONCE("SEGMENT: wrong current segment\n");
sbuf_silence_s16(outbuf, sample_count, data->output_channels, 0);
sbuf_silence_rest(sbuf);
return;
}
@ -31,10 +29,10 @@ void render_vgmstream_segmented(sample_t* outbuf, int32_t sample_count, VGMSTREA
mixing_info(data->segments[data->current_segment], NULL, &current_channels);
int samples_this_block = vgmstream_get_samples(data->segments[data->current_segment]);
int samples_filled = 0;
while (samples_filled < sample_count) {
while (sbuf->filled < sbuf->samples) {
int samples_to_do;
sample_t* buf;
sfmt_t segment_format;
void* buf_filled = NULL;
if (vgmstream->loop_flag && decode_do_loop(vgmstream)) {
/* handle looping (loop_layout has been called below, changes segments/state) */
@ -62,9 +60,9 @@ void render_vgmstream_segmented(sample_t* outbuf, int32_t sample_count, VGMSTREA
}
samples_to_do = decode_get_samples_to_do(samples_this_block, sample_count, vgmstream);
if (samples_to_do > sample_count - samples_filled)
samples_to_do = sample_count - samples_filled;
samples_to_do = decode_get_samples_to_do(samples_this_block, sbuf->samples, vgmstream);
if (samples_to_do > sbuf->samples - sbuf->filled)
samples_to_do = sbuf->samples - sbuf->filled;
if (samples_to_do > VGMSTREAM_SEGMENT_SAMPLE_BUFFER /*&& use_internal_buffer*/) /* always for fade/etc mixes */
samples_to_do = VGMSTREAM_SEGMENT_SAMPLE_BUFFER;
@ -73,25 +71,33 @@ void render_vgmstream_segmented(sample_t* outbuf, int32_t sample_count, VGMSTREA
goto decode_fail;
}
buf = use_internal_buffer ? data->buffer : &outbuf[samples_filled * data->output_channels];
render_vgmstream(buf, samples_to_do, data->segments[data->current_segment]);
segment_format = mixing_get_input_sample_type(data->segments[data->current_segment]);
sbuf_init(ssrc, segment_format, data->buffer, samples_to_do, data->segments[data->current_segment]->channels);
if (use_internal_buffer) {
sbuf_copy_segments(outbuf, data->output_channels, data->buffer, current_channels, samples_to_do, samples_filled);
// try to use part of outbuf directly if not remixed (minioptimization) //TODO improve detection
if (vgmstream->channels == data->input_channels && sbuf->fmt == segment_format && !data->mixed_channels) {
buf_filled = sbuf_get_filled_buf(sbuf);
ssrc->buf = buf_filled;
}
samples_filled += samples_to_do;
render_main(ssrc, data->segments[data->current_segment]);
// returned buf may have changed
if (ssrc->buf != buf_filled) {
sbuf_copy_segments(sbuf, ssrc);
}
sbuf->filled += samples_to_do;
vgmstream->current_sample += samples_to_do;
vgmstream->samples_into_block += samples_to_do;
}
return;
decode_fail:
sbuf_silence_s16(outbuf, sample_count, data->output_channels, samples_filled);
sbuf_silence_rest(sbuf);
}
void seek_layout_segmented(VGMSTREAM* vgmstream, int32_t seek_sample) {
segmented_layout_data* data = vgmstream->layout_data;
@ -145,8 +151,10 @@ fail:
}
bool setup_layout_segmented(segmented_layout_data* data) {
int max_input_channels = 0, max_output_channels = 0, mixed_channels = 0;
int max_input_channels = 0;
int max_output_channels = 0;
int max_sample_size = 0;
bool mixed_channels = false;
/* setup each VGMSTREAM (roughly equivalent to vgmstream.c's init_vgmstream_internal stuff) */
for (int i = 0; i < data->segment_count; i++) {
@ -187,7 +195,7 @@ bool setup_layout_segmented(segmented_layout_data* data) {
mixing_info(data->segments[i-1], NULL, &prev_output_channels);
if (segment_output_channels != prev_output_channels) {
mixed_channels = 1;
mixed_channels = true;
//VGM_LOG("SEGMENTED: segment %i has wrong channels %i vs prev channels %i\n", i, segment_output_channels, prev_output_channels);
//goto fail;
}
@ -202,6 +210,10 @@ bool setup_layout_segmented(segmented_layout_data* data) {
// goto fail;
}
int current_sample_size = sfmt_get_sample_size( mixing_get_input_sample_type(data->segments[i]) );
if (max_sample_size < current_sample_size)
max_sample_size = current_sample_size;
/* init mixing */
mixing_setup(data->segments[i], VGMSTREAM_SEGMENT_SAMPLE_BUFFER);
@ -213,8 +225,9 @@ bool setup_layout_segmented(segmented_layout_data* data) {
return false;
/* create internal buffer big enough for mixing */
if (!sbuf_realloc(&data->buffer, VGMSTREAM_SEGMENT_SAMPLE_BUFFER, max_input_channels))
goto fail;
free(data->buffer);
data->buffer = malloc(VGMSTREAM_SEGMENT_SAMPLE_BUFFER * max_input_channels * max_sample_size);
if (!data->buffer) goto fail;
data->input_channels = max_input_channels;
data->output_channels = max_output_channels;