Add stereo support to Wii U software mixer

This commit is contained in:
Clownacy 2020-04-19 19:27:02 +01:00
parent 288c2dccee
commit 9e3b158973

View file

@ -26,9 +26,9 @@ static unsigned long ticks_per_second;
static OSMutex sound_list_mutex; static OSMutex sound_list_mutex;
static OSMutex organya_mutex; static OSMutex organya_mutex;
static AXVoice *voice; static AXVoice *voices[2];
static short *stream_buffer; static short *stream_buffers[2];
static float *stream_buffer_float; static float *stream_buffer_float;
static size_t buffer_length; static size_t buffer_length;
static unsigned long output_frequency; static unsigned long output_frequency;
@ -100,9 +100,11 @@ static void FrameCallback(void)
unsigned int half; unsigned int half;
// Just assume both voices are in-sync
AXVoiceOffsets offsets; AXVoiceOffsets offsets;
AXGetVoiceOffsets(voice, &offsets); AXGetVoiceOffsets(voices[0], &offsets);
if (offsets.currentOffset > (buffer_length / 2)) if (offsets.currentOffset > (buffer_length / 2))
{ {
@ -125,17 +127,21 @@ static void FrameCallback(void)
for (unsigned int i = 0; i < buffer_length / 2; ++i) for (unsigned int i = 0; i < buffer_length / 2; ++i)
{ {
float sample = stream_buffer_float[i * 2]; for (unsigned int j = 0; j < 2; ++j)
{
float sample = stream_buffer_float[(i * 2) + j];
if (sample < -1.0f) if (sample < -1.0f)
sample = -1.0f; sample = -1.0f;
else if (sample > 1.0f) else if (sample > 1.0f)
sample = 1.0f; sample = 1.0f;
stream_buffer[((buffer_length / 2) * last_half) + i] = sample * 32767.0f; stream_buffers[j][((buffer_length / 2) * last_half) + i] = sample * 32767.0f;
}
} }
DCStoreRange(&stream_buffer[(buffer_length / 2) * last_half], buffer_length / 2 * sizeof(short)); DCStoreRange(&stream_buffers[0][(buffer_length / 2) * last_half], buffer_length / 2 * sizeof(short));
DCStoreRange(&stream_buffers[1][(buffer_length / 2) * last_half], buffer_length / 2 * sizeof(short));
last_half = half; last_half = half;
} }
@ -160,49 +166,78 @@ bool AudioBackend_Init(void)
Mixer_Init(output_frequency); Mixer_Init(output_frequency);
voice = AXAcquireVoice(31, NULL, NULL); buffer_length = output_frequency / 100; // 10ms buffer
if (voice != NULL) stream_buffer_float = (float*)malloc(buffer_length * sizeof(float) * 2);
if (stream_buffer_float != NULL)
{ {
AXVoiceBegin(voice); stream_buffers[0] = (short*)malloc(buffer_length * sizeof(short));
AXSetVoiceType(voice, 0); if (stream_buffers[0] != NULL)
{
stream_buffers[1] = (short*)malloc(buffer_length * sizeof(short));
AXVoiceVeData vol = {.volume = 0x8000}; if (stream_buffers[1] != NULL)
AXSetVoiceVe(voice, &vol); {
voices[0] = AXAcquireVoice(31, NULL, NULL);
AXVoiceDeviceMixData mix_data[6]; if (voices[0] != NULL)
memset(mix_data, 0, sizeof(mix_data)); {
mix_data[0].bus[0].volume = 0x8000; voices[1] = AXAcquireVoice(31, NULL, NULL);
mix_data[1].bus[0].volume = 0x8000;
AXSetVoiceDeviceMix(voice, AX_DEVICE_TYPE_DRC, 0, mix_data); if (voices[1] != NULL)
AXSetVoiceDeviceMix(voice, AX_DEVICE_TYPE_TV, 0, mix_data); {
for (unsigned int i = 0; i < 2; ++i)
{
AXVoiceBegin(voices[i]);
AXSetVoiceSrcRatio(voice, 1.0f); // We use the native sample rate AXSetVoiceType(voices[i], 0);
AXSetVoiceSrcType(voice, AX_VOICE_SRC_TYPE_NONE);
buffer_length = output_frequency / 100; // 10ms buffer AXVoiceVeData vol = {.volume = 0x8000};
AXSetVoiceVe(voices[i], &vol);
stream_buffer = (short*)malloc(buffer_length * sizeof(short)); AXVoiceDeviceMixData mix_data[6];
stream_buffer_float = (float*)malloc(buffer_length * sizeof(float) * 2); memset(mix_data, 0, sizeof(mix_data));
mix_data[0].bus[0].volume = i == 0 ? 0x8000 : 0;
mix_data[1].bus[0].volume = i == 1 ? 0x8000 : 0;
AXVoiceOffsets offs; AXSetVoiceDeviceMix(voices[i], AX_DEVICE_TYPE_DRC, 0, mix_data);
offs.dataType = AX_VOICE_FORMAT_LPCM16; AXSetVoiceDeviceMix(voices[i], AX_DEVICE_TYPE_TV, 0, mix_data);
offs.endOffset = buffer_length;
offs.loopingEnabled = AX_VOICE_LOOP_ENABLED;
offs.loopOffset = 0;
offs.currentOffset = 0;
offs.data = stream_buffer;
AXSetVoiceOffsets(voice, &offs);
AXSetVoiceState(voice, AX_VOICE_STATE_PLAYING); AXSetVoiceSrcRatio(voices[i], 1.0f); // We use the native sample rate
AXSetVoiceSrcType(voices[i], AX_VOICE_SRC_TYPE_NONE);
AXVoiceEnd(voice);
AXRegisterAppFrameCallback(FrameCallback); AXVoiceOffsets offs;
offs.dataType = AX_VOICE_FORMAT_LPCM16;
offs.endOffset = buffer_length;
offs.loopingEnabled = AX_VOICE_LOOP_ENABLED;
offs.loopOffset = 0;
offs.currentOffset = 0;
offs.data = stream_buffers[i];
AXSetVoiceOffsets(voices[i], &offs);
return true; AXSetVoiceState(voices[i], AX_VOICE_STATE_PLAYING);
AXVoiceEnd(voices[i]);
}
AXRegisterAppFrameCallback(FrameCallback);
return true;
}
AXFreeVoice(voices[0]);
}
free(stream_buffers[1]);
}
free(stream_buffers[0]);
}
free(stream_buffer_float);
} }
return false; return false;
@ -210,7 +245,11 @@ bool AudioBackend_Init(void)
void AudioBackend_Deinit(void) void AudioBackend_Deinit(void)
{ {
AXFreeVoice(voice); for (unsigned int i = 0; i < 2; ++i)
{
AXFreeVoice(voices[i]);
free(stream_buffers[i]);
}
AXQuit(); AXQuit();
} }