summaryrefslogtreecommitdiff
path: root/base/audio.c
diff options
context:
space:
mode:
Diffstat (limited to 'base/audio.c')
-rw-r--r--base/audio.c458
1 files changed, 458 insertions, 0 deletions
diff --git a/base/audio.c b/base/audio.c
new file mode 100644
index 0000000..1d385d5
--- /dev/null
+++ b/base/audio.c
@@ -0,0 +1,458 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <math.h>
+#include <stdlib.h>
+
+#define SAMPLE_RATE 48000
+#define NUM_CHANNELS 2
+#define FRAME_SIZE (NUM_CHANNELS * sizeof(short))
+
+// static inline float smoothstep(float edge0, float edge1, float x) {
+// x = (x - edge0) / (edge1 - edge0); // Scale x to [0, 1]
+// x = x < 0.0f ? 0.0f : (x > 1.0f ? 1.0f : x); // Clamp to [0, 1]
+// return x * x * (3.0f - 2.0f * x); // Smooth interpolation
+// }
+
+// static inline float smootherstep(float edge0, float edge1, float x) {
+// x = (x - edge0) / (edge1 - edge0); // Scale x to [0, 1]
+// x = x < 0.0f ? 0.0f : (x > 1.0f ? 1.0f : x); // Clamp to [0, 1]
+// return x * x * x * (x * (x * 6 - 15) + 10); // Modified curve
+// }
+
+static inline float fast_cos(float x) {
+ float x2 = x * x;
+ return 1.0f - x2 * (0.5f - x2 * 0.04166667f); // Approximation of cos(x)
+}
+
+static inline float cosine_smooth(float edge0, float edge1, float x) {
+ x = (x - edge0) / (edge1 - edge0); // Scale x to [0, 1]
+ x = x < 0.0f ? 0.0f : (x > 1.0f ? 1.0f : x); // Clamp to [0, 1]
+ return 0.5f * (1.0f - fast_cos(x * M_PI)); // Cosine smoothing
+}
+
+static float filter_phase = 0.0f;
+static float prev_output_sample_L = 0.0f;
+static float prev_output_sample_R = 0.0f;
+
+static void audio_callback_thread(int16_t *audio_buffer, size_t frames) {
+ int filter_override = state.filter_override; // Manual override: -1 = auto, 0 = off, 1 = on
+ float filter_frequency = state.filter_frequency; // Frequency in Hz for squarewave toggle
+
+ audio_callback(audio_buffer, frames);
+
+ if(filter_override) {
+ float a = 1.0f * M_PI * 4000.0f / (SAMPLE_RATE + 1.0f * M_PI * 4000.0f);
+ float phase_increment = filter_frequency / SAMPLE_RATE;
+
+ for(size_t i = 0; i < frames * 2; i += 2) {
+ float led_filter_active;
+
+ if(filter_override == -1) {
+ filter_phase += phase_increment;
+ if(filter_phase >= 1.0f) filter_phase -= 1.0f;
+
+ led_filter_active = cosine_smooth(0.45f, 0.50f, filter_phase) - cosine_smooth(0.95f, 1.00f, filter_phase);
+
+ } else {
+ led_filter_active = 1.0f; // Manual override (1 = on)
+ }
+
+ float input_sample_L = (float)audio_buffer[i] / 32767.0f;
+ float input_sample_R = (float)audio_buffer[i + 1] / 32767.0f;
+
+ float filtered_sample_L = a * input_sample_L + (1.0f - a) * prev_output_sample_L;
+ float filtered_sample_R = a * input_sample_R + (1.0f - a) * prev_output_sample_R;
+
+ prev_output_sample_L = filtered_sample_L;
+ prev_output_sample_R = filtered_sample_R;
+
+ audio_buffer[i] = (int16_t)((1.0f - led_filter_active) * input_sample_L * 32767.0f + led_filter_active * filtered_sample_L * 32767.0f);
+ audio_buffer[i + 1] = (int16_t)((1.0f - led_filter_active) * input_sample_R * 32767.0f + led_filter_active * filtered_sample_R * 32767.0f);
+ }
+ }
+}
+
+#ifdef __linux__
+
+
+#include <pipewire/pipewire.h>
+#include <spa/param/audio/format-utils.h>
+#include <spa/param/props.h>
+
+#define BUFFER_SIZE (512 * FRAME_SIZE)
+
+static struct pw_thread_loop *pa_thread_loop;
+static struct pw_context *pa_context;
+static struct pw_core *pa_core;
+static struct pw_stream *pa_stream;
+static struct spa_hook pa_stream_listener;
+static uint64_t audio_clock_frequency;
+static uint64_t playback_cursor;
+
+/*
+ * Called from PipeWire's real-time thread whenever new audio data is needed.
+ * We dequeue a buffer, call your audio_callback() to fill it, and then re-queue.
+ */
+static void on_process(void *userdata) {
+ struct pw_buffer *buffer;
+ struct spa_buffer *spa_buf;
+ int16_t *data;
+ uint32_t size;
+ uint32_t frames;
+ struct pw_time time_info;
+
+ buffer = pw_stream_dequeue_buffer(pa_stream);
+ if(!buffer) {
+ /* No buffer available, skip. */
+ return;
+ }
+
+ spa_buf = buffer->buffer;
+ if(!spa_buf->datas || !spa_buf->datas[0].data) {
+ pw_stream_queue_buffer(pa_stream, buffer);
+ return;
+ }
+
+ data = spa_buf->datas[0].data;
+ size = spa_buf->datas[0].maxsize;
+ frames = size / FRAME_SIZE;
+
+ // if(pw_stream_get_time_n(pa_stream, &time_info, sizeof(time_info)) == 0) {
+ // playback_cursor = time_info.now;
+ // }
+ // printf("Cursor(ns): %luns\n", playback_cursor);
+
+ audio_callback_thread(data, frames);
+
+ if(spa_buf->datas[0].chunk) {
+ spa_buf->datas[0].chunk->size = frames * FRAME_SIZE;
+ spa_buf->datas[0].chunk->stride = FRAME_SIZE;
+ }
+
+ pw_stream_queue_buffer(pa_stream, buffer);
+}
+
+/*
+ * Initialize PipeWire, create the stream, and connect for audio playback.
+ * Returns immediately so your main thread can continue.
+ */
+int audio_initialize(void) {
+ pw_init(0, 0);
+
+ pa_thread_loop = pw_thread_loop_new("my-audio-loop", 0);
+ if(pa_thread_loop) {
+ if(pw_thread_loop_start(pa_thread_loop) == 0) {
+ pw_thread_loop_lock(pa_thread_loop);
+
+ pa_context = pw_context_new(pw_thread_loop_get_loop(pa_thread_loop), 0, 0);
+ if(pa_context) {
+ pa_core = pw_context_connect(pa_context, 0, 0);
+ if(pa_core){
+ static const struct spa_dict_item items[] = {
+ SPA_DICT_ITEM_INIT(PW_KEY_MEDIA_TYPE, "Audio"),
+ SPA_DICT_ITEM_INIT(PW_KEY_MEDIA_CATEGORY, "Playback"),
+ SPA_DICT_ITEM_INIT(PW_KEY_MEDIA_ROLE, "Game"),
+ SPA_DICT_ITEM_INIT(PW_KEY_NODE_LATENCY, "512/48000")
+ };
+ struct pw_properties *props = pw_properties_new_dict(&SPA_DICT_INIT(items, 4));
+ // pw_properties_free(props);
+
+ pa_stream = pw_stream_new(pa_core, "My Audio Stream", props);
+ if(pa_stream) {
+ static struct pw_stream_events stream_events = { PW_VERSION_STREAM_EVENTS, .process = on_process, };
+ pw_stream_add_listener(pa_stream, &pa_stream_listener, &stream_events, 0);
+
+ /*
+ * Build two SPA params:
+ * 1) The audio format: S16_LE, SAMPLE_RATE, NUM_CHANNELS
+ * 2) The buffer param: request BUFFER_SIZE bytes per buffer
+ */
+ uint8_t fmt_buffer[1024];
+ struct spa_pod_builder fmt_builder = SPA_POD_BUILDER_INIT(fmt_buffer, sizeof(fmt_buffer));
+ const struct spa_pod *fmt_param = spa_pod_builder_add_object(
+ &fmt_builder,
+ SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
+ SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_audio),
+ SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
+ SPA_FORMAT_AUDIO_format, SPA_POD_Id(SPA_AUDIO_FORMAT_S16_LE),
+ SPA_FORMAT_AUDIO_rate, SPA_POD_Int(SAMPLE_RATE),
+ SPA_FORMAT_AUDIO_channels, SPA_POD_Int(NUM_CHANNELS)
+ );
+
+ uint8_t buf_buffer[1024];
+ struct spa_pod_builder buf_builder = SPA_POD_BUILDER_INIT(buf_buffer, sizeof(buf_buffer));
+ struct spa_pod *buf_param = spa_pod_builder_add_object(
+ &buf_builder,
+ SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, 16), /* We'll request 8 buffers, each of size = BUFFER_SIZE bytes. */
+ SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
+ SPA_PARAM_BUFFERS_size, SPA_POD_CHOICE_RANGE_Int(BUFFER_SIZE, BUFFER_SIZE, BUFFER_SIZE*8),
+ SPA_PARAM_BUFFERS_stride, SPA_POD_Int(FRAME_SIZE),
+ SPA_PARAM_BUFFERS_align, SPA_POD_Int(16)
+ );
+
+ const struct spa_pod *params[2];
+ params[0] = fmt_param;
+ params[1] = buf_param;
+
+ int res = pw_stream_connect(pa_stream, PW_DIRECTION_OUTPUT, PW_ID_ANY, PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_RT_PROCESS | PW_STREAM_FLAG_MAP_BUFFERS, params, 2);
+ pw_thread_loop_unlock(pa_thread_loop);
+ return 0;
+
+ } else {
+ fprintf(stderr, "Failed to create PipeWire stream\n");
+ }
+ pw_core_disconnect(pa_core);
+ } else {
+ fprintf(stderr, "Failed to connect context to core\n");
+ }
+ pw_context_destroy(pa_context);
+ } else {
+ fprintf(stderr, "Failed to create PipeWire context\n");
+ }
+ pw_thread_loop_unlock(pa_thread_loop);
+ pw_thread_loop_stop(pa_thread_loop);
+ } else {
+ fprintf(stderr, "Failed to start PipeWire thread loop\n");
+ }
+ pw_thread_loop_destroy(pa_thread_loop);
+ } else {
+ fprintf(stderr, "Failed to create PipeWire thread loop\n");
+ }
+ pw_deinit();
+ return -1;
+}
+
+/*
+ * Clean up PipeWire objects, stop the thread loop, and deinit.
+ * This should be called before your program exits.
+ */
+void audio_shutdown(void) {
+ if(!pa_thread_loop) {
+ return;
+ }
+
+ pw_thread_loop_lock(pa_thread_loop);
+
+ if(pa_stream){
+ pw_stream_disconnect(pa_stream);
+ pw_stream_destroy(pa_stream);
+ }
+
+ if(pa_core){
+ pw_core_disconnect(pa_core);
+ }
+
+ if(pa_context){
+ pw_context_destroy(pa_context);
+ }
+
+ pw_thread_loop_unlock(pa_thread_loop);
+ pw_thread_loop_stop(pa_thread_loop);
+ pw_thread_loop_destroy(pa_thread_loop);
+ pw_deinit();
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#elif _WIN32
+
+#define COBJMACROS
+#include <windows.h>
+#include <initguid.h>
+#include <audioclient.h>
+#include <mmdeviceapi.h>
+#include <avrt.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <timeapi.h>
+
+/*
+ * Minimal WASAPI shared-mode audio playback with explicit 48kHz/16-bit/2ch.
+ */
+
+#define NUM_CHANNELS 2
+
+static IMMDeviceEnumerator *enumerator;
+static IMMDevice *device_out;
+static IAudioClient *audio_client_out;
+static IAudioRenderClient *render_client;
+static HANDLE audio_event;
+static HANDLE audio_thread;
+static int running;
+
+static DWORD WINAPI audio_thread_proc(void *arg) {
+ UINT32 buffer_size;
+ UINT32 padding;
+ UINT32 available;
+ uint8_t *data;
+
+ IAudioClient_GetBufferSize(audio_client_out, &buffer_size);
+
+ while(running) {
+ WaitForSingleObject(audio_event, INFINITE);
+ if(!running) {
+ break;
+ }
+
+ IAudioClient_GetCurrentPadding(audio_client_out, &padding);
+ available = buffer_size - padding;
+ IAudioRenderClient_GetBuffer(render_client, available, &data);
+ audio_callback_thread((int16_t*)data, available);
+ IAudioRenderClient_ReleaseBuffer(render_client, available, 0);
+ }
+ return 0;
+}
+
+void audio_initialize() {
+ WAVEFORMATEX wf;
+ REFERENCE_TIME dur_out;
+
+ CoInitializeEx(0, COINIT_MULTITHREADED);
+ if(SUCCEEDED(CoCreateInstance(&CLSID_MMDeviceEnumerator, 0, CLSCTX_ALL, &IID_IMMDeviceEnumerator, (void**)&enumerator))) {
+ if(SUCCEEDED(IMMDeviceEnumerator_GetDefaultAudioEndpoint(enumerator, eRender, eConsole, &device_out))) {
+ if(SUCCEEDED(IMMDevice_Activate(device_out, &IID_IAudioClient, CLSCTX_ALL, 0, (void**)&audio_client_out))) {
+ wf.wFormatTag = WAVE_FORMAT_PCM;
+ wf.nChannels = NUM_CHANNELS;
+ wf.nSamplesPerSec = 48000;
+ wf.wBitsPerSample = 16;
+ wf.nBlockAlign = (wf.nChannels * wf.wBitsPerSample) / 8;
+ wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign;
+ wf.cbSize = 0;
+
+ IAudioClient_GetDevicePeriod(audio_client_out, &dur_out, 0);
+ IAudioClient_Initialize(audio_client_out, AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, dur_out, 0, &wf, 0);
+ audio_event = CreateEvent(0, FALSE, FALSE, 0);
+ if(audio_event){
+ IAudioClient_SetEventHandle(audio_client_out, audio_event);
+ IAudioClient_GetService(audio_client_out, &IID_IAudioRenderClient, (void**)&render_client);
+ IAudioClient_Start(audio_client_out);
+
+ running = 1;
+ audio_thread = CreateThread(0, 0, audio_thread_proc, 0, 0, 0);
+ return;
+ } else {
+ printf("Failed to create audio event\n");
+ }
+ audio_client_out->lpVtbl->Release(audio_client_out);
+ } else {
+ printf("Failed to activate audio client\n");
+ }
+ device_out->lpVtbl->Release(device_out);
+ } else {
+ printf("Failed to get default audio endpoint\n");
+ }
+ enumerator->lpVtbl->Release(enumerator);
+ } else {
+ printf("Failed to create MMDeviceEnumerator\n");
+ }
+}
+
+void audio_shutdown() {
+ running = 0;
+ if(audio_thread) {
+ SetEvent(audio_event);
+ WaitForSingleObject(audio_thread, INFINITE);
+ CloseHandle(audio_thread);
+ }
+ if(audio_event) {
+ CloseHandle(audio_event);
+ }
+ if(audio_client_out) {
+ IAudioClient_Stop(audio_client_out);
+ audio_client_out->lpVtbl->Release(audio_client_out);
+ }
+ if(render_client) {
+ render_client->lpVtbl->Release(render_client);
+ }
+ if(device_out) {
+ device_out->lpVtbl->Release(device_out);
+ }
+ if(enumerator) {
+ enumerator->lpVtbl->Release(enumerator);
+ }
+ CoUninitialize();
+}
+
+#endif
+
+
+
+
+
+// BELOW IS FOR FUTURE FRAME SYNCHRONIZATION!!!
+
+#if 0
+// Audio sync throttling logic (using audio playback clock)
+
+#define AUDIO_SAMPLE_RATE 48000
+#define FRAMETIME (1000000000 / 60) // NES: ~16.67ms per frame (replace as needed for PAL/other)
+
+static uint64_t emulator_start_time_ns = 0;
+static uint64_t audio_start_time_ns = 0;
+
+// Stub: return current audio playback time in nanoseconds
+uint64_t get_audio_playback_time_ns(void);
+
+// Call this once at emulation start
+void audio_sync_init(uint64_t current_time_ns) {
+ emulator_start_time_ns = current_time_ns;
+ audio_start_time_ns = get_audio_playback_time_ns();
+}
+
+// Call this at the end of each frame
+void audio_throttle_emulator(uint64_t frame_number, int64_t *frame_duration_ns) {
+ uint64_t expected_emulated_time = frame_number * FRAMETIME;
+ uint64_t actual_audio_time = get_audio_playback_time_ns() - audio_start_time_ns;
+
+ int64_t drift = (int64_t)(actual_audio_time) - (int64_t)(expected_emulated_time);
+
+ // Adjust frame duration to correct drift gradually
+ *frame_duration_ns -= drift / 8;
+ // Clamp adjustment to avoid jitter
+ if(*frame_duration_ns > FRAMETIME + 50000) {
+ *frame_duration_ns = FRAMETIME + 50000;
+ } else if(*frame_duration_ns < FRAMETIME - 50000) {
+ *frame_duration_ns = FRAMETIME - 50000;
+ }
+}
+
+#ifdef _WIN32
+#include <windows.h>
+#include <mmdeviceapi.h>
+#include <audioclient.h>
+
+uint64_t get_audio_playback_time_ns(void) {
+ // WASAPI: query IAudioClock interface
+ // This is just a placeholder. You’ll need to cache IAudioClock *audio_clock externally.
+ extern IAudioClock *audio_clock;
+ UINT64 pos;
+ audio_clock->lpVtbl->GetPosition(audio_clock, &pos, 0);
+ return (pos * 1000000000ULL) / AUDIO_SAMPLE_RATE;
+}
+
+#else
+// PipeWire backend
+#include <spa/clock/clock.h>
+extern struct spa_clock *audio_clock;
+
+uint64_t get_audio_playback_time_ns(void) {
+ struct spa_clock_info info;
+ audio_clock->get_time(audio_clock, &info);
+ return info.nsec;
+}
+#endif
+
+#endif