Start atlas
This commit is contained in:
113
util/audio.c
113
util/audio.c
@@ -6,21 +6,93 @@
|
||||
|
||||
AudioData audioData;
|
||||
|
||||
uint16_t getAvailableChannel() {
|
||||
for (uint16_t i = 0; i < NUM_SYNTH_VOICES; i++) {
|
||||
if (audioData.synthVoices[i].volume == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Helper: compute left/right gains from a pan value in [–1..+1]
|
||||
// pan = –1.0 → full left (L=1, R=0)
|
||||
// pan = +1.0 → full right (L=0, R=1)
|
||||
// pan = 0.0 → center (L=R=1/sqrt(2) or just 0.707 to avoid clipping)
|
||||
static void compute_stereo_gains(float pan, float *outL, float *outR) {
|
||||
// Simple linear panning (no constant‐power law).
|
||||
// If you prefer constant‐power, you could do:
|
||||
// float angle = (pan + 1.0f) * (M_PI / 4.0f);
|
||||
// *outL = cosf(angle);
|
||||
// *outR = sinf(angle);
|
||||
//
|
||||
// Here we’ll just do linear:
|
||||
pan = fmaxf(-1.0f, fminf(+1.0f, pan));
|
||||
if (pan <= 0.0f) {
|
||||
*outL = 1.0f;
|
||||
*outR = 1.0f + pan; // pan is negative, so R < 1
|
||||
} else {
|
||||
*outL = 1.0f - pan; // pan is positive, so L < 1
|
||||
*outR = 1.0f;
|
||||
}
|
||||
// Optionally, scale down both so we never exceed 1.0f / sqrt(2)
|
||||
// e.g. *outL *= 0.7071f; *outR *= 0.7071f;
|
||||
}
|
||||
|
||||
// This callback now writes stereo frames: interleaved L/R floats.
|
||||
void audio_callback(void *userdata, Uint8 *stream, int len) {
|
||||
AudioData *audio = (AudioData *) userdata;
|
||||
int samples = len / sizeof(float);
|
||||
|
||||
for (int i = 0; i < samples; i++) {
|
||||
float mix = 0.0f;
|
||||
int activeVoices = 0;
|
||||
// 'len' is total bytes; each sample‐frame is 2 floats (L+R), i.e. 2 * sizeof(float).
|
||||
int frames = len / (2 * sizeof(float));
|
||||
|
||||
for (int v = 0; v < NUM_SYNTH_VOICES; v++) {
|
||||
SynthVoice *voice = &audio->synthVoices[v];
|
||||
if (voice->volume == 0 || voice->frequency == 0) continue;
|
||||
// Zero out the entire output buffer (silence)
|
||||
// We’ll accumulate into it.
|
||||
// Each float is 4 bytes, so total floats = 2 * frames.
|
||||
float *outBuf = (float *) stream;
|
||||
for (int i = 0; i < 2 * frames; ++i) {
|
||||
outBuf[i] = 0.0f;
|
||||
}
|
||||
|
||||
float sample;
|
||||
// Precompute the listener center
|
||||
float listenerCx = audio->playerRect->x + audio->playerRect->w * 0.5f;
|
||||
|
||||
// For each synth voice, mix into the stereo buffer
|
||||
for (int v = 0; v < NUM_SYNTH_VOICES; v++) {
|
||||
SynthVoice *voice = &audio->synthVoices[v];
|
||||
if (voice->volume == 0 || voice->frequency == 0) {
|
||||
continue; // skip silent or inactive voices
|
||||
}
|
||||
|
||||
// Compute source center X
|
||||
float sourceCx = voice->sourceRect.x + voice->sourceRect.w * 0.5f;
|
||||
float dx = sourceCx - listenerCx;
|
||||
|
||||
// Normalize for pan. If |dx| >= maxPanDistance → full left or full right.
|
||||
float pan = dx / audio->maxPanDistance;
|
||||
if (pan < -1.0f) pan = -1.0f;
|
||||
if (pan > +1.0f) pan = +1.0f;
|
||||
|
||||
float gainL, gainR;
|
||||
compute_stereo_gains(pan, &gainL, &gainR);
|
||||
|
||||
// Optional: You could also attenuate overall volume with distance
|
||||
// float dist = fabsf(dx);
|
||||
// float distanceAtten = 1.0f - fminf(dist / audio->maxPanDistance, 1.0f);
|
||||
// float finalVolume = (voice->volume / 255.0f) * distanceAtten;
|
||||
// But for now, we’ll just use voice->volume for amplitude.
|
||||
|
||||
float amp = (voice->volume / 255.0f);
|
||||
|
||||
// Phase increment per sample‐frame:
|
||||
// (freq * 256) / SAMPLE_RATE tells how many phase steps per mono-sample.
|
||||
// Because we’re writing stereo, we still advance phase once per frame.
|
||||
uint8_t phaseInc = (uint8_t)((voice->frequency * 256) / SAMPLE_RATE);
|
||||
|
||||
// Mix into each frame
|
||||
for (int i = 0; i < frames; i++) {
|
||||
float t = (float) voice->phase / 255.0f * 2.0f - 1.0f;
|
||||
|
||||
float sample;
|
||||
switch (voice->waveform) {
|
||||
default:
|
||||
case WAVE_SINE:
|
||||
@@ -33,18 +105,27 @@ void audio_callback(void *userdata, Uint8 *stream, int len) {
|
||||
sample = t;
|
||||
break;
|
||||
case WAVE_TRIANGLE:
|
||||
sample = (t < 0) ? -t : t;
|
||||
sample = (t < 0.0f) ? -t : t;
|
||||
break;
|
||||
case WAVE_NOISE:
|
||||
sample = ((float) rand() / RAND_MAX) * 2.0f - 1.0f;
|
||||
break;
|
||||
}
|
||||
|
||||
voice->phase += (uint8_t) ((voice->frequency * 256) / SAMPLE_RATE);
|
||||
mix += sample * (voice->volume / 255.0f);
|
||||
activeVoices++;
|
||||
}
|
||||
voice->phase += phaseInc;
|
||||
|
||||
((float *) stream)[i] = (activeVoices > 0) ? mix / activeVoices : 0.0f;
|
||||
// Interleaved index: left = 2*i, right = 2*i + 1
|
||||
int idxL = 2 * i;
|
||||
int idxR = 2 * i + 1;
|
||||
|
||||
// Accumulate into buffer
|
||||
outBuf[idxL] += sample * amp * gainL;
|
||||
outBuf[idxR] += sample * amp * gainR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: We did not normalize by active voices here, because each voice already
|
||||
// uses its own volume. If you still want an automatic “divide by N active voices”,
|
||||
// you would need to track active voices per‐frame, which is relatively expensive.
|
||||
// In practice, you manage the volume per voice so clipping doesn’t occur.
|
||||
}
|
Reference in New Issue
Block a user