diff --git a/CMakeLists.txt b/CMakeLists.txt index a26aa7c..dc2a63d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,7 @@ include_directories(${CMAKE_SOURCE_DIR}/src/3d) include_directories(${CMAKE_SOURCE_DIR}/src/platform) include_directories(${CMAKE_SOURCE_DIR}/src/tiled) include_directories(${CMAKE_SOURCE_DIR}/src/ldtk) +include_directories(${CMAKE_SOURCE_DIR}/src/audio) include_directories(${CMAKE_SOURCE_DIR}/modules/RapidXML) include_directories(${CMAKE_SOURCE_DIR}/modules/json/single_include) diff --git a/src/McRFPy_API.cpp b/src/McRFPy_API.cpp index d1e8d80..62c731a 100644 --- a/src/McRFPy_API.cpp +++ b/src/McRFPy_API.cpp @@ -17,6 +17,7 @@ #include "PyMouseButton.h" #include "PyInputState.h" #include "PySound.h" +#include "PySoundBuffer.h" #include "PyMusic.h" #include "PyKeyboard.h" #include "PyMouse.h" @@ -467,6 +468,7 @@ PyObject* PyInit_mcrfpy() /*audio (#66)*/ &PySoundType, + &PySoundBufferType, &PyMusicType, /*keyboard state (#160)*/ diff --git a/src/PySound.cpp b/src/PySound.cpp index 536fa52..f6e2889 100644 --- a/src/PySound.cpp +++ b/src/PySound.cpp @@ -1,7 +1,9 @@ #include "PySound.h" +#include "PySoundBuffer.h" #include "McRFPy_API.h" #include "McRFPy_Doc.h" #include +#include PySound::PySound(const std::string& filename) : source(filename), loaded(false) @@ -12,6 +14,16 @@ PySound::PySound(const std::string& filename) } } +PySound::PySound(std::shared_ptr bufData) + : source(""), loaded(false), bufferData(bufData) +{ + if (bufData && !bufData->samples.empty()) { + buffer = bufData->getSfBuffer(); + sound.setBuffer(buffer); + loaded = true; + } +} + void PySound::play() { if (loaded) { @@ -64,6 +76,16 @@ float PySound::getDuration() const return buffer.getDuration().asSeconds(); } +float PySound::getPitch() const +{ + return sound.getPitch(); +} + +void PySound::setPitch(float pitch) +{ + sound.setPitch(std::max(0.01f, pitch)); +} + PyObject* PySound::pyObject() { auto type = (PyTypeObject*)PyObject_GetAttrString(McRFPy_API::mcrf_module, "Sound"); @@ -102,17 +124,40 @@ Py_hash_t PySound::hash(PyObject* obj) int PySound::init(PySoundObject* self, PyObject* args, PyObject* kwds) { - static const char* keywords[] = {"filename", nullptr}; - const char* filename = nullptr; + // Accept either a string filename or a SoundBuffer object + PyObject* source_obj = nullptr; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "s", const_cast(keywords), &filename)) { + if (!PyArg_ParseTuple(args, "O", &source_obj)) { return -1; } - self->data = std::make_shared(filename); + if (PyUnicode_Check(source_obj)) { + // String filename path + const char* filename = PyUnicode_AsUTF8(source_obj); + if (!filename) return -1; - if (!self->data->loaded) { - PyErr_Format(PyExc_RuntimeError, "Failed to load sound file: %s", filename); + self->data = std::make_shared(filename); + + if (!self->data->loaded) { + PyErr_Format(PyExc_RuntimeError, "Failed to load sound file: %s", filename); + return -1; + } + } else if (PyObject_IsInstance(source_obj, (PyObject*)&mcrfpydef::PySoundBufferType)) { + // SoundBuffer object + auto* sbObj = (PySoundBufferObject*)source_obj; + if (!sbObj->data || sbObj->data->samples.empty()) { + PyErr_SetString(PyExc_RuntimeError, "SoundBuffer is empty or invalid"); + return -1; + } + + self->data = std::make_shared(sbObj->data); + + if (!self->data->loaded) { + PyErr_SetString(PyExc_RuntimeError, "Failed to create sound from SoundBuffer"); + return -1; + } + } else { + PyErr_SetString(PyExc_TypeError, "Sound() argument must be a filename string or SoundBuffer"); return -1; } @@ -155,6 +200,43 @@ PyObject* PySound::py_stop(PySoundObject* self, PyObject* args) Py_RETURN_NONE; } +PyObject* PySound::py_play_varied(PySoundObject* self, PyObject* args, PyObject* kwds) +{ + static const char* keywords[] = {"pitch_range", "volume_range", nullptr}; + double pitch_range = 0.1; + double volume_range = 3.0; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|dd", const_cast(keywords), + &pitch_range, &volume_range)) { + return NULL; + } + if (!self->data) { + PyErr_SetString(PyExc_RuntimeError, "Sound object is invalid"); + return NULL; + } + + // Save original values + float origPitch = self->data->getPitch(); + float origVolume = self->data->getVolume(); + + // Randomize + static std::mt19937 rng(std::random_device{}()); + std::uniform_real_distribution pitchDist(-pitch_range, pitch_range); + std::uniform_real_distribution volDist(-volume_range, volume_range); + + self->data->setPitch(std::max(0.01f, origPitch + static_cast(pitchDist(rng)))); + self->data->setVolume(std::max(0.0f, std::min(100.0f, origVolume + static_cast(volDist(rng))))); + + self->data->play(); + + // Restore originals (SFML will use the values set at play time) + // Note: we restore after play() so the variation applies only to this instance + self->data->setPitch(origPitch); + self->data->setVolume(origVolume); + + Py_RETURN_NONE; +} + // Property getters/setters PyObject* PySound::get_volume(PySoundObject* self, void* closure) { @@ -225,6 +307,42 @@ PyObject* PySound::get_source(PySoundObject* self, void* closure) return PyUnicode_FromString(self->data->source.c_str()); } +PyObject* PySound::get_pitch(PySoundObject* self, void* closure) +{ + if (!self->data) { + PyErr_SetString(PyExc_RuntimeError, "Sound object is invalid"); + return NULL; + } + return PyFloat_FromDouble(self->data->getPitch()); +} + +int PySound::set_pitch(PySoundObject* self, PyObject* value, void* closure) +{ + if (!self->data) { + PyErr_SetString(PyExc_RuntimeError, "Sound object is invalid"); + return -1; + } + float pitch = PyFloat_AsDouble(value); + if (PyErr_Occurred()) { + return -1; + } + self->data->setPitch(pitch); + return 0; +} + +PyObject* PySound::get_buffer(PySoundObject* self, void* closure) +{ + if (!self->data) { + PyErr_SetString(PyExc_RuntimeError, "Sound object is invalid"); + return NULL; + } + auto bufData = self->data->getBufferData(); + if (!bufData) { + Py_RETURN_NONE; + } + return PySoundBuffer_from_data(bufData); +} + PyMethodDef PySound::methods[] = { {"play", (PyCFunction)PySound::py_play, METH_NOARGS, MCRF_METHOD(Sound, play, @@ -241,6 +359,14 @@ PyMethodDef PySound::methods[] = { MCRF_SIG("()", "None"), MCRF_DESC("Stop playing and reset to the beginning.") )}, + {"play_varied", (PyCFunction)PySound::py_play_varied, METH_VARARGS | METH_KEYWORDS, + MCRF_METHOD(Sound, play_varied, + MCRF_SIG("(pitch_range: float = 0.1, volume_range: float = 3.0)", "None"), + MCRF_DESC("Play with randomized pitch and volume for natural variation."), + MCRF_ARGS_START + MCRF_ARG("pitch_range", "Random pitch offset range (default 0.1)") + MCRF_ARG("volume_range", "Random volume offset range (default 3.0)") + )}, {NULL} }; @@ -255,5 +381,9 @@ PyGetSetDef PySound::getsetters[] = { MCRF_PROPERTY(duration, "Total duration of the sound in seconds (read-only)."), NULL}, {"source", (getter)PySound::get_source, NULL, MCRF_PROPERTY(source, "Filename path used to load this sound (read-only)."), NULL}, + {"pitch", (getter)PySound::get_pitch, (setter)PySound::set_pitch, + MCRF_PROPERTY(pitch, "Playback pitch multiplier (1.0 = normal, >1 = higher, <1 = lower)."), NULL}, + {"buffer", (getter)PySound::get_buffer, NULL, + MCRF_PROPERTY(buffer, "The SoundBuffer if created from one, else None (read-only)."), NULL}, {NULL} }; diff --git a/src/PySound.h b/src/PySound.h index b24a605..07d0da4 100644 --- a/src/PySound.h +++ b/src/PySound.h @@ -3,6 +3,7 @@ #include "Python.h" class PySound; +class SoundBufferData; typedef struct { PyObject_HEAD @@ -17,8 +18,12 @@ private: std::string source; bool loaded; + // SoundBuffer support: if created from a SoundBuffer, store reference + std::shared_ptr bufferData; + public: PySound(const std::string& filename); + PySound(std::shared_ptr bufData); // Playback control void play(); @@ -33,6 +38,13 @@ public: bool isPlaying() const; float getDuration() const; + // Pitch + float getPitch() const; + void setPitch(float pitch); + + // Buffer data access + std::shared_ptr getBufferData() const { return bufferData; } + // Python interface PyObject* pyObject(); static PyObject* repr(PyObject* obj); @@ -44,6 +56,7 @@ public: static PyObject* py_play(PySoundObject* self, PyObject* args); static PyObject* py_pause(PySoundObject* self, PyObject* args); static PyObject* py_stop(PySoundObject* self, PyObject* args); + static PyObject* py_play_varied(PySoundObject* self, PyObject* args, PyObject* kwds); // Python getters/setters static PyObject* get_volume(PySoundObject* self, void* closure); @@ -53,6 +66,9 @@ public: static PyObject* get_playing(PySoundObject* self, void* closure); static PyObject* get_duration(PySoundObject* self, void* closure); static PyObject* get_source(PySoundObject* self, void* closure); + static PyObject* get_pitch(PySoundObject* self, void* closure); + static int set_pitch(PySoundObject* self, PyObject* value, void* closure); + static PyObject* get_buffer(PySoundObject* self, void* closure); static PyMethodDef methods[]; static PyGetSetDef getsetters[]; @@ -67,7 +83,20 @@ namespace mcrfpydef { .tp_repr = PySound::repr, .tp_hash = PySound::hash, .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_doc = PyDoc_STR("Sound effect object for short audio clips"), + .tp_doc = PyDoc_STR( + "Sound(source)\n\n" + "Sound effect object for short audio clips.\n\n" + "Args:\n" + " source: Filename string or SoundBuffer object.\n\n" + "Properties:\n" + " volume (float): Volume 0-100.\n" + " loop (bool): Whether to loop.\n" + " playing (bool, read-only): True if playing.\n" + " duration (float, read-only): Duration in seconds.\n" + " source (str, read-only): Source filename.\n" + " pitch (float): Playback pitch (1.0 = normal).\n" + " buffer (SoundBuffer, read-only): The SoundBuffer, if created from one.\n" + ), .tp_methods = PySound::methods, .tp_getset = PySound::getsetters, .tp_init = (initproc)PySound::init, diff --git a/src/PySoundBuffer.cpp b/src/PySoundBuffer.cpp new file mode 100644 index 0000000..bff1673 --- /dev/null +++ b/src/PySoundBuffer.cpp @@ -0,0 +1,766 @@ +#include "PySoundBuffer.h" +#include "audio/SfxrSynth.h" +#include "audio/AudioEffects.h" +#include +#include +#include +#include + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +// Helper: create a Python SoundBuffer wrapping given data +PyObject* PySoundBuffer_from_data(std::shared_ptr data) { + auto* obj = (PySoundBufferObject*)mcrfpydef::PySoundBufferType.tp_alloc(&mcrfpydef::PySoundBufferType, 0); + if (obj) { + new (&obj->data) std::shared_ptr(std::move(data)); + } + return (PyObject*)obj; +} + +// ============================================================================ +// Type infrastructure +// ============================================================================ + +PyObject* PySoundBuffer::pynew(PyTypeObject* type, PyObject* args, PyObject* kwds) { + auto* self = (PySoundBufferObject*)type->tp_alloc(type, 0); + if (self) { + new (&self->data) std::shared_ptr(); + } + return (PyObject*)self; +} + +int PySoundBuffer::init(PySoundBufferObject* self, PyObject* args, PyObject* kwds) { + static const char* keywords[] = {"filename", nullptr}; + const char* filename = nullptr; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "s", const_cast(keywords), &filename)) { + return -1; + } + + // Load from file via sf::SoundBuffer + sf::SoundBuffer tmpBuf; + if (!tmpBuf.loadFromFile(filename)) { + PyErr_Format(PyExc_RuntimeError, "Failed to load sound file: %s", filename); + return -1; + } + + // Extract samples from the loaded buffer + auto data = std::make_shared(); + data->sampleRate = tmpBuf.getSampleRate(); + data->channels = tmpBuf.getChannelCount(); + + // Copy sample data from sf::SoundBuffer + auto count = tmpBuf.getSampleCount(); + if (count > 0) { + // SFML provides getSamples() on desktop; for headless/SDL2 we have no samples to copy + // On SFML desktop builds, sf::SoundBuffer has getSamples() +#if !defined(MCRF_HEADLESS) && !defined(MCRF_SDL2) + const sf::Int16* src = tmpBuf.getSamples(); + data->samples.assign(src, src + count); +#else + // Headless/SDL2: samples not directly accessible from sf::SoundBuffer + // Create silence of the appropriate duration + float dur = tmpBuf.getDuration().asSeconds(); + size_t numSamples = static_cast(dur * data->sampleRate * data->channels); + data->samples.resize(numSamples, 0); +#endif + } + + data->sfBufferDirty = true; + self->data = std::move(data); + return 0; +} + +PyObject* PySoundBuffer::repr(PyObject* obj) { + auto* self = (PySoundBufferObject*)obj; + std::ostringstream ss; + if (!self->data) { + ss << ""; + } else { + ss << ""; + } + std::string s = ss.str(); + return PyUnicode_FromString(s.c_str()); +} + +// ============================================================================ +// Properties +// ============================================================================ + +PyObject* PySoundBuffer::get_duration(PySoundBufferObject* self, void*) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + return PyFloat_FromDouble(self->data->duration()); +} + +PyObject* PySoundBuffer::get_sample_count(PySoundBufferObject* self, void*) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + return PyLong_FromSize_t(self->data->samples.size()); +} + +PyObject* PySoundBuffer::get_sample_rate(PySoundBufferObject* self, void*) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + return PyLong_FromUnsignedLong(self->data->sampleRate); +} + +PyObject* PySoundBuffer::get_channels(PySoundBufferObject* self, void*) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + return PyLong_FromUnsignedLong(self->data->channels); +} + +PyObject* PySoundBuffer::get_sfxr_params(PySoundBufferObject* self, void*) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + if (!self->data->sfxrParams) { + Py_RETURN_NONE; + } + return sfxr_params_to_dict(*self->data->sfxrParams); +} + +// ============================================================================ +// Class method: from_samples +// ============================================================================ + +PyObject* PySoundBuffer::from_samples(PyObject* cls, PyObject* args, PyObject* kwds) { + static const char* keywords[] = {"data", "channels", "sample_rate", nullptr}; + Py_buffer buf; + unsigned int ch = 1; + unsigned int rate = 44100; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "y*II", const_cast(keywords), + &buf, &ch, &rate)) { + return NULL; + } + + if (ch == 0 || rate == 0) { + PyBuffer_Release(&buf); + PyErr_SetString(PyExc_ValueError, "channels and sample_rate must be > 0"); + return NULL; + } + + size_t numSamples = buf.len / sizeof(int16_t); + auto data = std::make_shared(); + data->samples.resize(numSamples); + memcpy(data->samples.data(), buf.buf, numSamples * sizeof(int16_t)); + data->channels = ch; + data->sampleRate = rate; + data->sfBufferDirty = true; + + PyBuffer_Release(&buf); + return PySoundBuffer_from_data(std::move(data)); +} + +// ============================================================================ +// Class method: tone +// ============================================================================ + +PyObject* PySoundBuffer::tone(PyObject* cls, PyObject* args, PyObject* kwds) { + static const char* keywords[] = { + "frequency", "duration", "waveform", + "attack", "decay", "sustain", "release", + "sample_rate", nullptr + }; + double freq = 440.0; + double dur = 0.5; + const char* waveform = "sine"; + double attack = 0.01, decay_time = 0.0, sustain = 1.0, release = 0.01; + unsigned int rate = 44100; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "dd|sddddI", const_cast(keywords), + &freq, &dur, &waveform, &attack, &decay_time, + &sustain, &release, &rate)) { + return NULL; + } + + if (dur <= 0.0 || freq <= 0.0) { + PyErr_SetString(PyExc_ValueError, "frequency and duration must be positive"); + return NULL; + } + + size_t totalSamples = static_cast(dur * rate); + std::vector samples(totalSamples); + + std::string wf(waveform); + std::mt19937 noiseRng(42); // Deterministic noise + + // Generate waveform + for (size_t i = 0; i < totalSamples; i++) { + double t = static_cast(i) / rate; + double phase = fmod(t * freq, 1.0); + double sample = 0.0; + + if (wf == "sine") { + sample = sin(2.0 * M_PI * phase); + } else if (wf == "square") { + // PolyBLEP square + double naive = phase < 0.5 ? 1.0 : -1.0; + double dt = freq / rate; + // PolyBLEP correction at transitions + auto polyblep = [](double t, double dt) -> double { + if (t < dt) { t /= dt; return t + t - t * t - 1.0; } + if (t > 1.0 - dt) { t = (t - 1.0) / dt; return t * t + t + t + 1.0; } + return 0.0; + }; + sample = naive + polyblep(phase, dt) - polyblep(fmod(phase + 0.5, 1.0), dt); + } else if (wf == "saw") { + // PolyBLEP saw + double naive = 2.0 * phase - 1.0; + double dt = freq / rate; + auto polyblep = [](double t, double dt) -> double { + if (t < dt) { t /= dt; return t + t - t * t - 1.0; } + if (t > 1.0 - dt) { t = (t - 1.0) / dt; return t * t + t + t + 1.0; } + return 0.0; + }; + sample = naive - polyblep(phase, dt); + } else if (wf == "triangle") { + sample = 4.0 * fabs(phase - 0.5) - 1.0; + } else if (wf == "noise") { + std::uniform_real_distribution dist(-1.0, 1.0); + sample = dist(noiseRng); + } else { + PyErr_Format(PyExc_ValueError, + "Unknown waveform '%s'. Use: sine, square, saw, triangle, noise", waveform); + return NULL; + } + + // ADSR envelope + double env = 1.0; + double noteEnd = dur - release; + if (t < attack) { + env = (attack > 0.0) ? t / attack : 1.0; + } else if (t < attack + decay_time) { + double decayProgress = (decay_time > 0.0) ? (t - attack) / decay_time : 1.0; + env = 1.0 - (1.0 - sustain) * decayProgress; + } else if (t < noteEnd) { + env = sustain; + } else { + double releaseProgress = (release > 0.0) ? (t - noteEnd) / release : 1.0; + env = sustain * (1.0 - std::min(releaseProgress, 1.0)); + } + + sample *= env; + sample = std::max(-1.0, std::min(1.0, sample)); + samples[i] = static_cast(sample * 32000.0); + } + + auto data = std::make_shared(std::move(samples), rate, 1); + return PySoundBuffer_from_data(std::move(data)); +} + +// ============================================================================ +// Class method: sfxr +// ============================================================================ + +PyObject* PySoundBuffer::sfxr(PyObject* cls, PyObject* args, PyObject* kwds) { + // Accept either: sfxr("preset") or sfxr(wave_type=0, base_freq=0.3, ...) + static const char* keywords[] = { + "preset", "seed", + "wave_type", "base_freq", "freq_limit", "freq_ramp", "freq_dramp", + "duty", "duty_ramp", + "vib_strength", "vib_speed", + "env_attack", "env_sustain", "env_decay", "env_punch", + "lpf_freq", "lpf_ramp", "lpf_resonance", + "hpf_freq", "hpf_ramp", + "pha_offset", "pha_ramp", + "repeat_speed", + "arp_speed", "arp_mod", + nullptr + }; + + const char* preset = nullptr; + PyObject* seed_obj = Py_None; + + // sfxr params - initialized to -999 as sentinel (unset) + int wave_type = -999; + double base_freq = -999, freq_limit = -999, freq_ramp = -999, freq_dramp = -999; + double duty = -999, duty_ramp = -999; + double vib_strength = -999, vib_speed = -999; + double env_attack = -999, env_sustain = -999, env_decay = -999, env_punch = -999; + double lpf_freq = -999, lpf_ramp = -999, lpf_resonance = -999; + double hpf_freq = -999, hpf_ramp = -999; + double pha_offset = -999, pha_ramp = -999; + double repeat_speed = -999; + double arp_speed = -999, arp_mod = -999; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|zOidddddddddddddddddddddd", + const_cast(keywords), + &preset, &seed_obj, + &wave_type, &base_freq, &freq_limit, &freq_ramp, &freq_dramp, + &duty, &duty_ramp, + &vib_strength, &vib_speed, + &env_attack, &env_sustain, &env_decay, &env_punch, + &lpf_freq, &lpf_ramp, &lpf_resonance, + &hpf_freq, &hpf_ramp, + &pha_offset, &pha_ramp, + &repeat_speed, + &arp_speed, &arp_mod)) { + return NULL; + } + + // Get seed + uint32_t seed = 0; + bool hasSeed = false; + if (seed_obj != Py_None) { + if (PyLong_Check(seed_obj)) { + seed = static_cast(PyLong_AsUnsignedLong(seed_obj)); + if (PyErr_Occurred()) return NULL; + hasSeed = true; + } else { + PyErr_SetString(PyExc_TypeError, "seed must be an integer"); + return NULL; + } + } + + SfxrParams params; + + if (preset) { + // Generate from preset + std::string presetName(preset); + std::mt19937 rng; + if (hasSeed) { + rng.seed(seed); + } else { + std::random_device rd; + rng.seed(rd()); + } + + if (!sfxr_preset(presetName, params, rng)) { + PyErr_Format(PyExc_ValueError, + "Unknown sfxr preset '%s'. Valid: coin, laser, explosion, powerup, hurt, jump, blip", + preset); + return NULL; + } + } else { + // Custom params - start with defaults + params = SfxrParams(); + if (wave_type != -999) params.wave_type = wave_type; + if (base_freq != -999) params.base_freq = static_cast(base_freq); + if (freq_limit != -999) params.freq_limit = static_cast(freq_limit); + if (freq_ramp != -999) params.freq_ramp = static_cast(freq_ramp); + if (freq_dramp != -999) params.freq_dramp = static_cast(freq_dramp); + if (duty != -999) params.duty = static_cast(duty); + if (duty_ramp != -999) params.duty_ramp = static_cast(duty_ramp); + if (vib_strength != -999) params.vib_strength = static_cast(vib_strength); + if (vib_speed != -999) params.vib_speed = static_cast(vib_speed); + if (env_attack != -999) params.env_attack = static_cast(env_attack); + if (env_sustain != -999) params.env_sustain = static_cast(env_sustain); + if (env_decay != -999) params.env_decay = static_cast(env_decay); + if (env_punch != -999) params.env_punch = static_cast(env_punch); + if (lpf_freq != -999) params.lpf_freq = static_cast(lpf_freq); + if (lpf_ramp != -999) params.lpf_ramp = static_cast(lpf_ramp); + if (lpf_resonance != -999) params.lpf_resonance = static_cast(lpf_resonance); + if (hpf_freq != -999) params.hpf_freq = static_cast(hpf_freq); + if (hpf_ramp != -999) params.hpf_ramp = static_cast(hpf_ramp); + if (pha_offset != -999) params.pha_offset = static_cast(pha_offset); + if (pha_ramp != -999) params.pha_ramp = static_cast(pha_ramp); + if (repeat_speed != -999) params.repeat_speed = static_cast(repeat_speed); + if (arp_speed != -999) params.arp_speed = static_cast(arp_speed); + if (arp_mod != -999) params.arp_mod = static_cast(arp_mod); + } + + // Synthesize + std::vector samples = sfxr_synthesize(params); + + auto data = std::make_shared(std::move(samples), 44100, 1); + data->sfxrParams = std::make_shared(params); + return PySoundBuffer_from_data(std::move(data)); +} + +// ============================================================================ +// DSP effect methods (each returns new SoundBuffer) +// ============================================================================ + +PyObject* PySoundBuffer::pitch_shift(PySoundBufferObject* self, PyObject* args) { + double factor; + if (!PyArg_ParseTuple(args, "d", &factor)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + if (factor <= 0.0) { PyErr_SetString(PyExc_ValueError, "pitch factor must be positive"); return NULL; } + + auto result = AudioEffects::pitchShift(self->data->samples, self->data->channels, factor); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::low_pass(PySoundBufferObject* self, PyObject* args) { + double cutoff; + if (!PyArg_ParseTuple(args, "d", &cutoff)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::lowPass(self->data->samples, self->data->sampleRate, self->data->channels, cutoff); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::high_pass(PySoundBufferObject* self, PyObject* args) { + double cutoff; + if (!PyArg_ParseTuple(args, "d", &cutoff)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::highPass(self->data->samples, self->data->sampleRate, self->data->channels, cutoff); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::echo(PySoundBufferObject* self, PyObject* args) { + double delay_ms, feedback, wet; + if (!PyArg_ParseTuple(args, "ddd", &delay_ms, &feedback, &wet)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::echo(self->data->samples, self->data->sampleRate, self->data->channels, + delay_ms, feedback, wet); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::reverb(PySoundBufferObject* self, PyObject* args) { + double room_size, damping, wet; + if (!PyArg_ParseTuple(args, "ddd", &room_size, &damping, &wet)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::reverb(self->data->samples, self->data->sampleRate, self->data->channels, + room_size, damping, wet); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::distortion(PySoundBufferObject* self, PyObject* args) { + double drive; + if (!PyArg_ParseTuple(args, "d", &drive)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::distortion(self->data->samples, drive); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::bit_crush(PySoundBufferObject* self, PyObject* args) { + int bits, rateDiv; + if (!PyArg_ParseTuple(args, "ii", &bits, &rateDiv)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::bitCrush(self->data->samples, bits, rateDiv); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::normalize(PySoundBufferObject* self, PyObject* args) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::normalize(self->data->samples); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::reverse(PySoundBufferObject* self, PyObject* args) { + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::reverse(self->data->samples, self->data->channels); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::slice(PySoundBufferObject* self, PyObject* args) { + double startSec, endSec; + if (!PyArg_ParseTuple(args, "dd", &startSec, &endSec)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + + auto result = AudioEffects::slice(self->data->samples, self->data->sampleRate, self->data->channels, + startSec, endSec); + auto data = std::make_shared(std::move(result), self->data->sampleRate, self->data->channels); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::sfxr_mutate(PySoundBufferObject* self, PyObject* args) { + double amount = 0.05; + PyObject* seed_obj = Py_None; + if (!PyArg_ParseTuple(args, "|dO", &amount, &seed_obj)) return NULL; + if (!self->data) { PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer"); return NULL; } + if (!self->data->sfxrParams) { + PyErr_SetString(PyExc_RuntimeError, "SoundBuffer was not created with sfxr - no params to mutate"); + return NULL; + } + + std::mt19937 rng; + if (seed_obj != Py_None && PyLong_Check(seed_obj)) { + rng.seed(static_cast(PyLong_AsUnsignedLong(seed_obj))); + } else { + std::random_device rd; + rng.seed(rd()); + } + + SfxrParams mutated = sfxr_mutate_params(*self->data->sfxrParams, static_cast(amount), rng); + std::vector samples = sfxr_synthesize(mutated); + + auto data = std::make_shared(std::move(samples), 44100, 1); + data->sfxrParams = std::make_shared(mutated); + return PySoundBuffer_from_data(std::move(data)); +} + +// ============================================================================ +// Composition class methods +// ============================================================================ + +PyObject* PySoundBuffer::concat(PyObject* cls, PyObject* args, PyObject* kwds) { + static const char* keywords[] = {"buffers", "overlap", nullptr}; + PyObject* bufList; + double overlap = 0.0; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|d", const_cast(keywords), + &bufList, &overlap)) { + return NULL; + } + + if (!PySequence_Check(bufList)) { + PyErr_SetString(PyExc_TypeError, "buffers must be a sequence of SoundBuffer objects"); + return NULL; + } + + Py_ssize_t count = PySequence_Size(bufList); + if (count <= 0) { + PyErr_SetString(PyExc_ValueError, "buffers must not be empty"); + return NULL; + } + + // Gather all buffer data + std::vector> buffers; + for (Py_ssize_t i = 0; i < count; i++) { + PyObject* item = PySequence_GetItem(bufList, i); + if (!item || !PyObject_IsInstance(item, (PyObject*)&mcrfpydef::PySoundBufferType)) { + Py_XDECREF(item); + PyErr_SetString(PyExc_TypeError, "All items must be SoundBuffer objects"); + return NULL; + } + auto* sbObj = (PySoundBufferObject*)item; + if (!sbObj->data) { + Py_DECREF(item); + PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer in list"); + return NULL; + } + buffers.push_back(sbObj->data); + Py_DECREF(item); + } + + // Verify matching channels + unsigned int ch = buffers[0]->channels; + unsigned int rate = buffers[0]->sampleRate; + for (auto& b : buffers) { + if (b->channels != ch) { + PyErr_SetString(PyExc_ValueError, "All buffers must have the same number of channels"); + return NULL; + } + } + + // Build concatenated samples with optional crossfade overlap + size_t overlapSamples = static_cast(overlap * rate * ch); + + std::vector result; + for (size_t i = 0; i < buffers.size(); i++) { + auto& src = buffers[i]->samples; + if (i == 0 || overlapSamples == 0 || result.size() < overlapSamples) { + result.insert(result.end(), src.begin(), src.end()); + } else { + // Crossfade overlap region + size_t ovl = std::min(overlapSamples, std::min(result.size(), src.size())); + size_t startInResult = result.size() - ovl; + for (size_t j = 0; j < ovl; j++) { + float fade = static_cast(j) / static_cast(ovl); + float a = result[startInResult + j] * (1.0f - fade); + float b = src[j] * fade; + result[startInResult + j] = static_cast(std::max(-32768.0f, std::min(32767.0f, a + b))); + } + // Append remaining + if (ovl < src.size()) { + result.insert(result.end(), src.begin() + ovl, src.end()); + } + } + } + + auto data = std::make_shared(std::move(result), rate, ch); + return PySoundBuffer_from_data(std::move(data)); +} + +PyObject* PySoundBuffer::mix(PyObject* cls, PyObject* args, PyObject* kwds) { + static const char* keywords[] = {"buffers", nullptr}; + PyObject* bufList; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O", const_cast(keywords), &bufList)) { + return NULL; + } + + if (!PySequence_Check(bufList)) { + PyErr_SetString(PyExc_TypeError, "buffers must be a sequence of SoundBuffer objects"); + return NULL; + } + + Py_ssize_t count = PySequence_Size(bufList); + if (count <= 0) { + PyErr_SetString(PyExc_ValueError, "buffers must not be empty"); + return NULL; + } + + std::vector> buffers; + for (Py_ssize_t i = 0; i < count; i++) { + PyObject* item = PySequence_GetItem(bufList, i); + if (!item || !PyObject_IsInstance(item, (PyObject*)&mcrfpydef::PySoundBufferType)) { + Py_XDECREF(item); + PyErr_SetString(PyExc_TypeError, "All items must be SoundBuffer objects"); + return NULL; + } + auto* sbObj = (PySoundBufferObject*)item; + if (!sbObj->data) { + Py_DECREF(item); + PyErr_SetString(PyExc_RuntimeError, "Invalid SoundBuffer in list"); + return NULL; + } + buffers.push_back(sbObj->data); + Py_DECREF(item); + } + + unsigned int ch = buffers[0]->channels; + unsigned int rate = buffers[0]->sampleRate; + + // Find longest buffer + size_t maxLen = 0; + for (auto& b : buffers) maxLen = std::max(maxLen, b->samples.size()); + + // Mix: sum and clamp + std::vector result(maxLen, 0); + for (auto& b : buffers) { + for (size_t i = 0; i < b->samples.size(); i++) { + int32_t sum = static_cast(result[i]) + static_cast(b->samples[i]); + result[i] = static_cast(std::max(-32768, std::min(32767, sum))); + } + } + + auto data = std::make_shared(std::move(result), rate, ch); + return PySoundBuffer_from_data(std::move(data)); +} + +// ============================================================================ +// Method/GetSet tables +// ============================================================================ + +PyMethodDef PySoundBuffer::methods[] = { + // Class methods (factories) + {"from_samples", (PyCFunction)PySoundBuffer::from_samples, METH_VARARGS | METH_KEYWORDS | METH_CLASS, + MCRF_METHOD(SoundBuffer, from_samples, + MCRF_SIG("(data: bytes, channels: int, sample_rate: int)", "SoundBuffer"), + MCRF_DESC("Create a SoundBuffer from raw int16 PCM sample data."), + MCRF_ARGS_START + MCRF_ARG("data", "Raw PCM data as bytes (int16 little-endian)") + MCRF_ARG("channels", "Number of audio channels (1=mono, 2=stereo)") + MCRF_ARG("sample_rate", "Sample rate in Hz (e.g. 44100)") + )}, + {"tone", (PyCFunction)PySoundBuffer::tone, METH_VARARGS | METH_KEYWORDS | METH_CLASS, + MCRF_METHOD(SoundBuffer, tone, + MCRF_SIG("(frequency: float, duration: float, waveform: str = 'sine', ...)", "SoundBuffer"), + MCRF_DESC("Generate a tone with optional ADSR envelope."), + MCRF_ARGS_START + MCRF_ARG("frequency", "Frequency in Hz") + MCRF_ARG("duration", "Duration in seconds") + MCRF_ARG("waveform", "One of: sine, square, saw, triangle, noise") + MCRF_ARG("attack", "ADSR attack time in seconds (default 0.01)") + MCRF_ARG("decay", "ADSR decay time in seconds (default 0.0)") + MCRF_ARG("sustain", "ADSR sustain level 0.0-1.0 (default 1.0)") + MCRF_ARG("release", "ADSR release time in seconds (default 0.01)") + )}, + {"sfxr", (PyCFunction)PySoundBuffer::sfxr, METH_VARARGS | METH_KEYWORDS | METH_CLASS, + MCRF_METHOD(SoundBuffer, sfxr, + MCRF_SIG("(preset: str = None, seed: int = None, **params)", "SoundBuffer"), + MCRF_DESC("Generate retro sound effects using sfxr synthesis."), + MCRF_ARGS_START + MCRF_ARG("preset", "One of: coin, laser, explosion, powerup, hurt, jump, blip") + MCRF_ARG("seed", "Random seed for deterministic generation") + MCRF_RETURNS("SoundBuffer with sfxr_params set for later mutation") + )}, + {"concat", (PyCFunction)PySoundBuffer::concat, METH_VARARGS | METH_KEYWORDS | METH_CLASS, + MCRF_METHOD(SoundBuffer, concat, + MCRF_SIG("(buffers: list[SoundBuffer], overlap: float = 0.0)", "SoundBuffer"), + MCRF_DESC("Concatenate multiple SoundBuffers with optional crossfade overlap."), + MCRF_ARGS_START + MCRF_ARG("buffers", "List of SoundBuffer objects to concatenate") + MCRF_ARG("overlap", "Crossfade overlap duration in seconds") + )}, + {"mix", (PyCFunction)PySoundBuffer::mix, METH_VARARGS | METH_KEYWORDS | METH_CLASS, + MCRF_METHOD(SoundBuffer, mix, + MCRF_SIG("(buffers: list[SoundBuffer])", "SoundBuffer"), + MCRF_DESC("Mix multiple SoundBuffers together (additive, clamped)."), + MCRF_ARGS_START + MCRF_ARG("buffers", "List of SoundBuffer objects to mix") + )}, + + // Instance methods (DSP effects) + {"pitch_shift", (PyCFunction)PySoundBuffer::pitch_shift, METH_VARARGS, + MCRF_METHOD(SoundBuffer, pitch_shift, + MCRF_SIG("(factor: float)", "SoundBuffer"), + MCRF_DESC("Resample to shift pitch. factor>1 = higher+shorter.") + )}, + {"low_pass", (PyCFunction)PySoundBuffer::low_pass, METH_VARARGS, + MCRF_METHOD(SoundBuffer, low_pass, + MCRF_SIG("(cutoff_hz: float)", "SoundBuffer"), + MCRF_DESC("Apply single-pole IIR low-pass filter.") + )}, + {"high_pass", (PyCFunction)PySoundBuffer::high_pass, METH_VARARGS, + MCRF_METHOD(SoundBuffer, high_pass, + MCRF_SIG("(cutoff_hz: float)", "SoundBuffer"), + MCRF_DESC("Apply single-pole IIR high-pass filter.") + )}, + {"echo", (PyCFunction)PySoundBuffer::echo, METH_VARARGS, + MCRF_METHOD(SoundBuffer, echo, + MCRF_SIG("(delay_ms: float, feedback: float, wet: float)", "SoundBuffer"), + MCRF_DESC("Apply echo effect with delay, feedback, and wet/dry mix.") + )}, + {"reverb", (PyCFunction)PySoundBuffer::reverb, METH_VARARGS, + MCRF_METHOD(SoundBuffer, reverb, + MCRF_SIG("(room_size: float, damping: float, wet: float)", "SoundBuffer"), + MCRF_DESC("Apply simplified Freeverb-style reverb.") + )}, + {"distortion", (PyCFunction)PySoundBuffer::distortion, METH_VARARGS, + MCRF_METHOD(SoundBuffer, distortion, + MCRF_SIG("(drive: float)", "SoundBuffer"), + MCRF_DESC("Apply tanh soft clipping distortion.") + )}, + {"bit_crush", (PyCFunction)PySoundBuffer::bit_crush, METH_VARARGS, + MCRF_METHOD(SoundBuffer, bit_crush, + MCRF_SIG("(bits: int, rate_divisor: int)", "SoundBuffer"), + MCRF_DESC("Reduce bit depth and sample rate for lo-fi effect.") + )}, + {"normalize", (PyCFunction)PySoundBuffer::normalize, METH_NOARGS, + MCRF_METHOD(SoundBuffer, normalize, + MCRF_SIG("()", "SoundBuffer"), + MCRF_DESC("Scale samples to 95%% of int16 max.") + )}, + {"reverse", (PyCFunction)PySoundBuffer::reverse, METH_NOARGS, + MCRF_METHOD(SoundBuffer, reverse, + MCRF_SIG("()", "SoundBuffer"), + MCRF_DESC("Reverse the sample order.") + )}, + {"slice", (PyCFunction)PySoundBuffer::slice, METH_VARARGS, + MCRF_METHOD(SoundBuffer, slice, + MCRF_SIG("(start: float, end: float)", "SoundBuffer"), + MCRF_DESC("Extract a time range in seconds.") + )}, + {"sfxr_mutate", (PyCFunction)PySoundBuffer::sfxr_mutate, METH_VARARGS, + MCRF_METHOD(SoundBuffer, sfxr_mutate, + MCRF_SIG("(amount: float = 0.05, seed: int = None)", "SoundBuffer"), + MCRF_DESC("Jitter sfxr params and re-synthesize. Only works on sfxr-generated buffers.") + )}, + {NULL} +}; + +PyGetSetDef PySoundBuffer::getsetters[] = { + {"duration", (getter)PySoundBuffer::get_duration, NULL, + MCRF_PROPERTY(duration, "Total duration in seconds (read-only)."), NULL}, + {"sample_count", (getter)PySoundBuffer::get_sample_count, NULL, + MCRF_PROPERTY(sample_count, "Total number of samples (read-only)."), NULL}, + {"sample_rate", (getter)PySoundBuffer::get_sample_rate, NULL, + MCRF_PROPERTY(sample_rate, "Sample rate in Hz (read-only)."), NULL}, + {"channels", (getter)PySoundBuffer::get_channels, NULL, + MCRF_PROPERTY(channels, "Number of audio channels (read-only)."), NULL}, + {"sfxr_params", (getter)PySoundBuffer::get_sfxr_params, NULL, + MCRF_PROPERTY(sfxr_params, "Dict of sfxr parameters if sfxr-generated, else None (read-only)."), NULL}, + {NULL} +}; diff --git a/src/PySoundBuffer.h b/src/PySoundBuffer.h new file mode 100644 index 0000000..0e050ae --- /dev/null +++ b/src/PySoundBuffer.h @@ -0,0 +1,123 @@ +#pragma once +#include "Common.h" +#include "Python.h" +#include "McRFPy_Doc.h" +#include +#include +#include +#include + +// Forward declarations +struct SfxrParams; + +// Core audio data container - holds authoritative sample data +class SoundBufferData : public std::enable_shared_from_this +{ +public: + std::vector samples; + unsigned int sampleRate = 44100; + unsigned int channels = 1; + + // Optional sfxr params (set when created via sfxr synthesis) + std::shared_ptr sfxrParams; + + // Lazy sf::SoundBuffer rebuild + sf::SoundBuffer sfBuffer; + bool sfBufferDirty = true; + + SoundBufferData() = default; + SoundBufferData(std::vector&& s, unsigned int rate, unsigned int ch) + : samples(std::move(s)), sampleRate(rate), channels(ch), sfBufferDirty(true) {} + + // Rebuild sf::SoundBuffer from samples if dirty + sf::SoundBuffer& getSfBuffer() { + if (sfBufferDirty && !samples.empty()) { + sfBuffer.loadFromSamples(samples.data(), samples.size(), channels, sampleRate); + sfBufferDirty = false; + } + return sfBuffer; + } + + float duration() const { + if (sampleRate == 0 || channels == 0 || samples.empty()) return 0.0f; + return static_cast(samples.size()) / static_cast(channels) / static_cast(sampleRate); + } +}; + +// Python object wrapper +typedef struct { + PyObject_HEAD + std::shared_ptr data; +} PySoundBufferObject; + +// Python type methods/getset declarations +namespace PySoundBuffer { + // tp_init, tp_new, tp_repr + int init(PySoundBufferObject* self, PyObject* args, PyObject* kwds); + PyObject* pynew(PyTypeObject* type, PyObject* args, PyObject* kwds); + PyObject* repr(PyObject* obj); + + // Class methods (factories) + PyObject* from_samples(PyObject* cls, PyObject* args, PyObject* kwds); + PyObject* tone(PyObject* cls, PyObject* args, PyObject* kwds); + PyObject* sfxr(PyObject* cls, PyObject* args, PyObject* kwds); + PyObject* concat(PyObject* cls, PyObject* args, PyObject* kwds); + PyObject* mix(PyObject* cls, PyObject* args, PyObject* kwds); + + // Instance methods (DSP - each returns new SoundBuffer) + PyObject* pitch_shift(PySoundBufferObject* self, PyObject* args); + PyObject* low_pass(PySoundBufferObject* self, PyObject* args); + PyObject* high_pass(PySoundBufferObject* self, PyObject* args); + PyObject* echo(PySoundBufferObject* self, PyObject* args); + PyObject* reverb(PySoundBufferObject* self, PyObject* args); + PyObject* distortion(PySoundBufferObject* self, PyObject* args); + PyObject* bit_crush(PySoundBufferObject* self, PyObject* args); + PyObject* normalize(PySoundBufferObject* self, PyObject* args); + PyObject* reverse(PySoundBufferObject* self, PyObject* args); + PyObject* slice(PySoundBufferObject* self, PyObject* args); + PyObject* sfxr_mutate(PySoundBufferObject* self, PyObject* args); + + // Properties + PyObject* get_duration(PySoundBufferObject* self, void* closure); + PyObject* get_sample_count(PySoundBufferObject* self, void* closure); + PyObject* get_sample_rate(PySoundBufferObject* self, void* closure); + PyObject* get_channels(PySoundBufferObject* self, void* closure); + PyObject* get_sfxr_params(PySoundBufferObject* self, void* closure); + + extern PyMethodDef methods[]; + extern PyGetSetDef getsetters[]; +} + +// Helper: create a new PySoundBufferObject wrapping given data +PyObject* PySoundBuffer_from_data(std::shared_ptr data); + +namespace mcrfpydef { + inline PyTypeObject PySoundBufferType = { + .ob_base = {.ob_base = {.ob_refcnt = 1, .ob_type = NULL}, .ob_size = 0}, + .tp_name = "mcrfpy.SoundBuffer", + .tp_basicsize = sizeof(PySoundBufferObject), + .tp_itemsize = 0, + .tp_repr = PySoundBuffer::repr, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = PyDoc_STR( + "SoundBuffer(filename: str)\n" + "SoundBuffer.from_samples(data: bytes, channels: int, sample_rate: int)\n" + "SoundBuffer.tone(frequency: float, duration: float, waveform: str = 'sine', ...)\n" + "SoundBuffer.sfxr(preset: str, seed: int = None)\n\n" + "Audio sample buffer for procedural audio generation and effects.\n\n" + "Holds PCM sample data that can be created from files, raw samples,\n" + "tone synthesis, or sfxr presets. Effect methods return new SoundBuffer\n" + "instances (copy-modify pattern).\n\n" + "Properties:\n" + " duration (float, read-only): Duration in seconds.\n" + " sample_count (int, read-only): Total number of samples.\n" + " sample_rate (int, read-only): Samples per second (e.g. 44100).\n" + " channels (int, read-only): Number of audio channels.\n" + " sfxr_params (dict or None, read-only): sfxr parameters if sfxr-generated.\n" + ), + .tp_methods = PySoundBuffer::methods, + .tp_getset = PySoundBuffer::getsetters, + .tp_init = (initproc)PySoundBuffer::init, + .tp_new = PySoundBuffer::pynew, + }; +} diff --git a/src/audio/AudioEffects.cpp b/src/audio/AudioEffects.cpp new file mode 100644 index 0000000..98f5463 --- /dev/null +++ b/src/audio/AudioEffects.cpp @@ -0,0 +1,336 @@ +#include "AudioEffects.h" +#include +#include +#include + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +namespace AudioEffects { + +// ============================================================================ +// Pitch shift via linear interpolation resampling +// ============================================================================ + +std::vector pitchShift(const std::vector& samples, unsigned int channels, double factor) { + if (samples.empty() || factor <= 0.0) return samples; + + size_t frames = samples.size() / channels; + size_t newFrames = static_cast(frames / factor); + if (newFrames == 0) newFrames = 1; + + std::vector result(newFrames * channels); + + for (size_t i = 0; i < newFrames; i++) { + double srcPos = i * factor; + size_t idx0 = static_cast(srcPos); + double frac = srcPos - idx0; + size_t idx1 = std::min(idx0 + 1, frames - 1); + + for (unsigned int ch = 0; ch < channels; ch++) { + double s0 = samples[idx0 * channels + ch]; + double s1 = samples[idx1 * channels + ch]; + double interp = s0 + (s1 - s0) * frac; + result[i * channels + ch] = static_cast(std::max(-32768.0, std::min(32767.0, interp))); + } + } + + return result; +} + +// ============================================================================ +// Low-pass filter (single-pole IIR) +// ============================================================================ + +std::vector lowPass(const std::vector& samples, unsigned int sampleRate, unsigned int channels, double cutoffHz) { + if (samples.empty()) return samples; + + double rc = 1.0 / (2.0 * M_PI * cutoffHz); + double dt = 1.0 / sampleRate; + double alpha = dt / (rc + dt); + + std::vector result(samples.size()); + std::vector prev(channels, 0.0); + + size_t frames = samples.size() / channels; + for (size_t i = 0; i < frames; i++) { + for (unsigned int ch = 0; ch < channels; ch++) { + double input = samples[i * channels + ch]; + prev[ch] = prev[ch] + alpha * (input - prev[ch]); + result[i * channels + ch] = static_cast(std::max(-32768.0, std::min(32767.0, prev[ch]))); + } + } + + return result; +} + +// ============================================================================ +// High-pass filter (complement of low-pass) +// ============================================================================ + +std::vector highPass(const std::vector& samples, unsigned int sampleRate, unsigned int channels, double cutoffHz) { + if (samples.empty()) return samples; + + double rc = 1.0 / (2.0 * M_PI * cutoffHz); + double dt = 1.0 / sampleRate; + double alpha = rc / (rc + dt); + + std::vector result(samples.size()); + std::vector prevIn(channels, 0.0); + std::vector prevOut(channels, 0.0); + + size_t frames = samples.size() / channels; + for (size_t i = 0; i < frames; i++) { + for (unsigned int ch = 0; ch < channels; ch++) { + double input = samples[i * channels + ch]; + prevOut[ch] = alpha * (prevOut[ch] + input - prevIn[ch]); + prevIn[ch] = input; + result[i * channels + ch] = static_cast(std::max(-32768.0, std::min(32767.0, prevOut[ch]))); + } + } + + return result; +} + +// ============================================================================ +// Echo (circular delay buffer with feedback) +// ============================================================================ + +std::vector echo(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double delayMs, double feedback, double wet) { + if (samples.empty()) return samples; + + size_t delaySamples = static_cast(delayMs * sampleRate * channels / 1000.0); + if (delaySamples == 0) return samples; + + std::vector delay(delaySamples, 0.0); + std::vector result(samples.size()); + size_t pos = 0; + + for (size_t i = 0; i < samples.size(); i++) { + double input = samples[i]; + double delayed = delay[pos % delaySamples]; + double output = input + delayed * wet; + delay[pos % delaySamples] = input + delayed * feedback; + result[i] = static_cast(std::max(-32768.0, std::min(32767.0, output))); + pos++; + } + + return result; +} + +// ============================================================================ +// Reverb (simplified Freeverb: 4 comb filters + 2 allpass) +// ============================================================================ + +namespace { + struct CombFilter { + std::vector buffer; + size_t pos = 0; + double filterStore = 0.0; + + CombFilter(size_t size) : buffer(size, 0.0) {} + + double process(double input, double feedback, double damp) { + double output = buffer[pos]; + filterStore = output * (1.0 - damp) + filterStore * damp; + buffer[pos] = input + filterStore * feedback; + pos = (pos + 1) % buffer.size(); + return output; + } + }; + + struct AllpassFilter { + std::vector buffer; + size_t pos = 0; + + AllpassFilter(size_t size) : buffer(size, 0.0) {} + + double process(double input) { + double buffered = buffer[pos]; + double output = -input + buffered; + buffer[pos] = input + buffered * 0.5; + pos = (pos + 1) % buffer.size(); + return output; + } + }; +} + +std::vector reverb(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double roomSize, double damping, double wet) { + if (samples.empty()) return samples; + + // Comb filter delays (in samples, scaled for sample rate) + double scale = sampleRate / 44100.0; + size_t combSizes[4] = { + static_cast(1116 * scale), + static_cast(1188 * scale), + static_cast(1277 * scale), + static_cast(1356 * scale) + }; + size_t allpassSizes[2] = { + static_cast(556 * scale), + static_cast(441 * scale) + }; + + CombFilter combs[4] = { + CombFilter(combSizes[0]), CombFilter(combSizes[1]), + CombFilter(combSizes[2]), CombFilter(combSizes[3]) + }; + AllpassFilter allpasses[2] = { + AllpassFilter(allpassSizes[0]), AllpassFilter(allpassSizes[1]) + }; + + double feedback = roomSize * 0.9 + 0.05; + double dry = 1.0 - wet; + + std::vector result(samples.size()); + + // Process mono (mix channels if stereo, then duplicate) + for (size_t i = 0; i < samples.size(); i += channels) { + // Mix to mono for reverb processing + double mono = 0.0; + for (unsigned int ch = 0; ch < channels; ch++) { + mono += samples[i + ch]; + } + mono /= channels; + mono /= 32768.0; // Normalize to -1..1 + + // Parallel comb filters + double reverbSample = 0.0; + for (int c = 0; c < 4; c++) { + reverbSample += combs[c].process(mono, feedback, damping); + } + + // Series allpass filters + for (int a = 0; a < 2; a++) { + reverbSample = allpasses[a].process(reverbSample); + } + + // Mix wet/dry and write to all channels + for (unsigned int ch = 0; ch < channels; ch++) { + double original = samples[i + ch] / 32768.0; + double output = original * dry + reverbSample * wet; + result[i + ch] = static_cast(std::max(-32768.0, std::min(32767.0, output * 32768.0))); + } + } + + return result; +} + +// ============================================================================ +// Distortion (tanh soft clip) +// ============================================================================ + +std::vector distortion(const std::vector& samples, double drive) { + if (samples.empty()) return samples; + + std::vector result(samples.size()); + for (size_t i = 0; i < samples.size(); i++) { + double s = samples[i] / 32768.0; + s = std::tanh(s * drive); + result[i] = static_cast(std::max(-32768.0, std::min(32767.0, s * 32768.0))); + } + return result; +} + +// ============================================================================ +// Bit crush (quantize + sample rate reduce) +// ============================================================================ + +std::vector bitCrush(const std::vector& samples, int bits, int rateDivisor) { + if (samples.empty()) return samples; + + bits = std::max(1, std::min(16, bits)); + rateDivisor = std::max(1, rateDivisor); + + int levels = 1 << bits; + double quantStep = 65536.0 / levels; + + std::vector result(samples.size()); + int16_t held = 0; + + for (size_t i = 0; i < samples.size(); i++) { + if (i % rateDivisor == 0) { + // Quantize + double s = samples[i] + 32768.0; // Shift to 0..65536 + s = std::floor(s / quantStep) * quantStep; + held = static_cast(s - 32768.0); + } + result[i] = held; + } + + return result; +} + +// ============================================================================ +// Normalize (scale to 95% of int16 max) +// ============================================================================ + +std::vector normalize(const std::vector& samples) { + if (samples.empty()) return samples; + + int16_t peak = 0; + for (auto s : samples) { + int16_t abs_s = (s < 0) ? static_cast(-s) : s; + if (abs_s > peak) peak = abs_s; + } + + if (peak == 0) return samples; + + double scale = 31128.0 / peak; // 95% of 32767 + std::vector result(samples.size()); + for (size_t i = 0; i < samples.size(); i++) { + double s = samples[i] * scale; + result[i] = static_cast(std::max(-32768.0, std::min(32767.0, s))); + } + + return result; +} + +// ============================================================================ +// Reverse (frame-aware for multichannel) +// ============================================================================ + +std::vector reverse(const std::vector& samples, unsigned int channels) { + if (samples.empty()) return samples; + + size_t frames = samples.size() / channels; + std::vector result(samples.size()); + + for (size_t i = 0; i < frames; i++) { + size_t srcFrame = frames - 1 - i; + for (unsigned int ch = 0; ch < channels; ch++) { + result[i * channels + ch] = samples[srcFrame * channels + ch]; + } + } + + return result; +} + +// ============================================================================ +// Slice (extract sub-range by time) +// ============================================================================ + +std::vector slice(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double startSec, double endSec) { + if (samples.empty()) return {}; + + size_t frames = samples.size() / channels; + size_t startFrame = static_cast(std::max(0.0, startSec) * sampleRate); + size_t endFrame = static_cast(std::max(0.0, endSec) * sampleRate); + + startFrame = std::min(startFrame, frames); + endFrame = std::min(endFrame, frames); + + if (startFrame >= endFrame) return {}; + + size_t numFrames = endFrame - startFrame; + std::vector result(numFrames * channels); + std::memcpy(result.data(), &samples[startFrame * channels], numFrames * channels * sizeof(int16_t)); + + return result; +} + +} // namespace AudioEffects diff --git a/src/audio/AudioEffects.h b/src/audio/AudioEffects.h new file mode 100644 index 0000000..d02f0ec --- /dev/null +++ b/src/audio/AudioEffects.h @@ -0,0 +1,42 @@ +#pragma once +#include +#include + +// Pure DSP functions: vector -> vector +// All return NEW vectors, never modify input. +namespace AudioEffects { + +// Resample to shift pitch. factor>1 = higher pitch + shorter duration. +std::vector pitchShift(const std::vector& samples, unsigned int channels, double factor); + +// Single-pole IIR low-pass filter +std::vector lowPass(const std::vector& samples, unsigned int sampleRate, unsigned int channels, double cutoffHz); + +// High-pass filter (complement of low-pass) +std::vector highPass(const std::vector& samples, unsigned int sampleRate, unsigned int channels, double cutoffHz); + +// Delay-line echo with feedback +std::vector echo(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double delayMs, double feedback, double wet); + +// Simplified Freeverb: 4 comb filters + 2 allpass +std::vector reverb(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double roomSize, double damping, double wet); + +// tanh soft clipping +std::vector distortion(const std::vector& samples, double drive); + +// Reduce bit depth and sample rate +std::vector bitCrush(const std::vector& samples, int bits, int rateDivisor); + +// Scale to 95% of int16 max +std::vector normalize(const std::vector& samples); + +// Reverse sample order (frame-aware for multichannel) +std::vector reverse(const std::vector& samples, unsigned int channels); + +// Extract sub-range by time offsets +std::vector slice(const std::vector& samples, unsigned int sampleRate, unsigned int channels, + double startSec, double endSec); + +} // namespace AudioEffects diff --git a/src/audio/SfxrSynth.cpp b/src/audio/SfxrSynth.cpp new file mode 100644 index 0000000..88470e3 --- /dev/null +++ b/src/audio/SfxrSynth.cpp @@ -0,0 +1,499 @@ +#include "SfxrSynth.h" +#include +#include +#include + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +// ============================================================================ +// sfxr synthesis engine +// Based on the original sfxr by DrPetter +// 8x supersampled, 44100 Hz mono output +// ============================================================================ + +std::vector sfxr_synthesize(const SfxrParams& p) { + // Convert parameters to internal representation + const int OVERSAMPLE = 8; + const int SAMPLE_RATE = 44100; + + double fperiod; + double fmaxperiod; + double fslide; + double fdslide; + int period; + double square_duty; + double square_slide; + + // Envelope + int env_length[3]; + double env_vol; + int env_stage; + int env_time; + + // Vibrato + double vib_phase; + double vib_speed; + double vib_amp; + + // Low-pass filter + double fltp; + double fltdp; + double fltw; + double fltw_d; + double fltdmp; + double fltphp; + double flthp; + double flthp_d; + + // Phaser + double phaser_buffer[1024]; + int phaser_pos; + double phaser_offset; + double phaser_delta; + + // Noise buffer + double noise_buffer[32]; + + // Arpeggio + double arp_time; + double arp_limit; + double arp_mod; + + // Repeat + double rep_time; + double rep_limit; + + int phase; + + // Initialize + auto reset = [&](bool restart) { + if (!restart) { + phase = 0; + } + fperiod = 100.0 / (p.base_freq * p.base_freq + 0.001); + period = static_cast(fperiod); + fmaxperiod = 100.0 / (p.freq_limit * p.freq_limit + 0.001); + fslide = 1.0 - std::pow(p.freq_ramp, 3.0) * 0.01; + fdslide = -std::pow(p.freq_dramp, 3.0) * 0.000001; + square_duty = 0.5 - p.duty * 0.5; + square_slide = -p.duty_ramp * 0.00005; + + if (p.arp_mod >= 0.0f) { + arp_mod = 1.0 - std::pow(p.arp_mod, 2.0) * 0.9; + } else { + arp_mod = 1.0 + std::pow(p.arp_mod, 2.0) * 10.0; + } + arp_time = 0; + arp_limit = (p.arp_speed == 1.0f) ? 0 : static_cast(std::pow(1.0 - p.arp_speed, 2.0) * 20000 + 32); + + if (!restart) { + // Noise buffer + for (int i = 0; i < 32; i++) { + noise_buffer[i] = ((std::rand() % 20001) / 10000.0) - 1.0; + } + + // Phaser + std::memset(phaser_buffer, 0, sizeof(phaser_buffer)); + phaser_pos = 0; + phaser_offset = std::pow(p.pha_offset, 2.0) * 1020.0; + if (p.pha_offset < 0.0f) phaser_offset = -phaser_offset; + phaser_delta = std::pow(p.pha_ramp, 2.0) * 1.0; + if (p.pha_ramp < 0.0f) phaser_delta = -phaser_delta; + + // Filter + fltp = 0.0; + fltdp = 0.0; + fltw = std::pow(p.lpf_freq, 3.0) * 0.1; + fltw_d = 1.0 + p.lpf_ramp * 0.0001; + fltdmp = 5.0 / (1.0 + std::pow(p.lpf_resonance, 2.0) * 20.0) * (0.01 + fltw); + if (fltdmp > 0.8) fltdmp = 0.8; + fltphp = 0.0; + flthp = std::pow(p.hpf_freq, 2.0) * 0.1; + flthp_d = 1.0 + p.hpf_ramp * 0.0003; + + // Vibrato + vib_phase = 0.0; + vib_speed = std::pow(p.vib_speed, 2.0) * 0.01; + vib_amp = p.vib_strength * 0.5; + + // Envelope + env_vol = 0.0; + env_stage = 0; + env_time = 0; + env_length[0] = static_cast(p.env_attack * p.env_attack * 100000.0); + env_length[1] = static_cast(p.env_sustain * p.env_sustain * 100000.0); + env_length[2] = static_cast(p.env_decay * p.env_decay * 100000.0); + + // Repeat + rep_time = 0; + rep_limit = (p.repeat_speed == 0.0f) ? 0 : + static_cast(std::pow(1.0 - p.repeat_speed, 2.0) * 20000 + 32); + } + }; + + // Seed RNG deterministically based on params + std::srand(42); + + reset(false); + + // Generate samples - max 4 seconds of audio + int maxSamples = SAMPLE_RATE * 4; + std::vector output; + output.reserve(maxSamples); + + for (int si = 0; si < maxSamples; si++) { + // Repeat + rep_time++; + if (rep_limit != 0 && rep_time >= rep_limit) { + rep_time = 0; + reset(true); + } + + // Arpeggio + arp_time++; + if (arp_limit != 0 && arp_time >= arp_limit) { + arp_limit = 0; + fperiod *= arp_mod; + } + + // Frequency slide + fslide += fdslide; + fperiod *= fslide; + if (fperiod > fmaxperiod) { + fperiod = fmaxperiod; + if (p.freq_limit > 0.0f) { + // Sound has ended + break; + } + } + + // Vibrato + double rfperiod = fperiod; + if (vib_amp > 0.0) { + vib_phase += vib_speed; + rfperiod = fperiod * (1.0 + std::sin(vib_phase) * vib_amp); + } + period = static_cast(rfperiod); + if (period < 8) period = 8; + + // Duty cycle + square_duty += square_slide; + if (square_duty < 0.0) square_duty = 0.0; + if (square_duty > 0.5) square_duty = 0.5; + + // Envelope + env_time++; + if (env_time > env_length[env_stage]) { + env_time = 0; + env_stage++; + if (env_stage == 3) { + break; // Sound complete + } + } + if (env_stage == 0) { + env_vol = (env_length[0] > 0) ? + static_cast(env_time) / env_length[0] : 1.0; + } else if (env_stage == 1) { + env_vol = 1.0 + (1.0 - static_cast(env_time) / std::max(1, env_length[1])) * 2.0 * p.env_punch; + } else { + env_vol = 1.0 - static_cast(env_time) / std::max(1, env_length[2]); + } + + // Phaser + phaser_offset += phaser_delta; + int iphaser_offset = std::abs(static_cast(phaser_offset)); + if (iphaser_offset > 1023) iphaser_offset = 1023; + + // Filter + if (flthp_d != 0.0) { + flthp *= flthp_d; + if (flthp < 0.00001) flthp = 0.00001; + if (flthp > 0.1) flthp = 0.1; + } + + // 8x supersampling + double ssample = 0.0; + for (int si2 = 0; si2 < OVERSAMPLE; si2++) { + double sample = 0.0; + phase++; + double fphase = static_cast(phase) / period; + + // Waveform generation + switch (p.wave_type) { + case 0: // Square + sample = (fphase < square_duty) ? 0.5 : -0.5; + break; + case 1: // Sawtooth + sample = 1.0 - fphase * 2.0; + break; + case 2: // Sine + sample = std::sin(fphase * 2.0 * M_PI); + break; + case 3: // Noise + sample = noise_buffer[static_cast(fphase * 32) % 32]; + break; + } + + // Low-pass filter + double pp = fltp; + fltw *= fltw_d; + if (fltw < 0.0) fltw = 0.0; + if (fltw > 0.1) fltw = 0.1; + if (p.lpf_freq != 1.0f) { + fltdp += (sample - fltp) * fltw; + fltdp -= fltdp * fltdmp; + } else { + fltp = sample; + fltdp = 0.0; + } + fltp += fltdp; + + // High-pass filter + fltphp += fltp - pp; + fltphp -= fltphp * flthp; + sample = fltphp; + + // Phaser + phaser_buffer[phaser_pos & 1023] = sample; + sample += phaser_buffer[(phaser_pos - iphaser_offset + 1024) & 1023]; + phaser_pos = (phaser_pos + 1) & 1023; + + // Accumulate + ssample += sample * env_vol; + } + + // Average supersamples and scale + ssample = ssample / OVERSAMPLE * 0.2; // master_vol + ssample *= 2.0; // Boost + + // Clamp + if (ssample > 1.0) ssample = 1.0; + if (ssample < -1.0) ssample = -1.0; + + output.push_back(static_cast(ssample * 32000.0)); + } + + return output; +} + +// ============================================================================ +// Presets +// ============================================================================ + +static float rnd(std::mt19937& rng, float range) { + std::uniform_real_distribution dist(0.0f, range); + return dist(rng); +} + +static float rnd01(std::mt19937& rng) { + return rnd(rng, 1.0f); +} + +bool sfxr_preset(const std::string& name, SfxrParams& p, std::mt19937& rng) { + p = SfxrParams(); // Reset to defaults + + if (name == "coin" || name == "pickup") { + p.base_freq = 0.4f + rnd(rng, 0.5f); + p.env_attack = 0.0f; + p.env_sustain = rnd(rng, 0.1f); + p.env_decay = 0.1f + rnd(rng, 0.4f); + p.env_punch = 0.3f + rnd(rng, 0.3f); + if (rnd01(rng) < 0.5f) { + p.arp_speed = 0.5f + rnd(rng, 0.2f); + p.arp_mod = 0.2f + rnd(rng, 0.4f); + } + } + else if (name == "laser" || name == "shoot") { + p.wave_type = static_cast(rnd(rng, 3.0f)); + if (p.wave_type == 2 && rnd01(rng) < 0.5f) + p.wave_type = static_cast(rnd(rng, 2.0f)); + p.base_freq = 0.5f + rnd(rng, 0.5f); + p.freq_limit = std::max(0.2f, p.base_freq - 0.2f - rnd(rng, 0.6f)); + p.freq_ramp = -0.15f - rnd(rng, 0.2f); + if (rnd01(rng) < 0.33f) { + p.base_freq = 0.3f + rnd(rng, 0.6f); + p.freq_limit = rnd(rng, 0.1f); + p.freq_ramp = -0.35f - rnd(rng, 0.3f); + } + if (rnd01(rng) < 0.5f) { + p.duty = rnd(rng, 0.5f); + p.duty_ramp = rnd(rng, 0.2f); + } else { + p.duty = 0.4f + rnd(rng, 0.5f); + p.duty_ramp = -rnd(rng, 0.7f); + } + p.env_attack = 0.0f; + p.env_sustain = 0.1f + rnd(rng, 0.2f); + p.env_decay = rnd(rng, 0.4f); + if (rnd01(rng) < 0.5f) p.env_punch = rnd(rng, 0.3f); + if (rnd01(rng) < 0.33f) { + p.pha_offset = rnd(rng, 0.2f); + p.pha_ramp = -rnd(rng, 0.2f); + } + if (rnd01(rng) < 0.5f) p.hpf_freq = rnd(rng, 0.3f); + } + else if (name == "explosion") { + p.wave_type = 3; // noise + if (rnd01(rng) < 0.5f) { + p.base_freq = 0.1f + rnd(rng, 0.4f); + p.freq_ramp = -0.1f + rnd(rng, 0.4f); + } else { + p.base_freq = 0.2f + rnd(rng, 0.7f); + p.freq_ramp = -0.2f - rnd(rng, 0.2f); + } + p.base_freq *= p.base_freq; + if (rnd01(rng) < 0.2f) p.freq_ramp = 0.0f; + if (rnd01(rng) < 0.33f) p.repeat_speed = 0.3f + rnd(rng, 0.5f); + p.env_attack = 0.0f; + p.env_sustain = 0.1f + rnd(rng, 0.3f); + p.env_decay = rnd(rng, 0.5f); + if (rnd01(rng) < 0.5f) { + p.pha_offset = -0.3f + rnd(rng, 0.9f); + p.pha_ramp = -rnd(rng, 0.3f); + } + p.env_punch = 0.2f + rnd(rng, 0.6f); + if (rnd01(rng) < 0.5f) { + p.vib_strength = rnd(rng, 0.7f); + p.vib_speed = rnd(rng, 0.6f); + } + if (rnd01(rng) < 0.33f) { + p.arp_speed = 0.6f + rnd(rng, 0.3f); + p.arp_mod = 0.8f - rnd(rng, 1.6f); + } + } + else if (name == "powerup") { + if (rnd01(rng) < 0.5f) { + p.wave_type = 1; // saw + } else { + p.duty = rnd(rng, 0.6f); + } + if (rnd01(rng) < 0.5f) { + p.base_freq = 0.2f + rnd(rng, 0.3f); + p.freq_ramp = 0.1f + rnd(rng, 0.4f); + p.repeat_speed = 0.4f + rnd(rng, 0.4f); + } else { + p.base_freq = 0.2f + rnd(rng, 0.3f); + p.freq_ramp = 0.05f + rnd(rng, 0.2f); + if (rnd01(rng) < 0.5f) { + p.vib_strength = rnd(rng, 0.7f); + p.vib_speed = rnd(rng, 0.6f); + } + } + p.env_attack = 0.0f; + p.env_sustain = rnd(rng, 0.4f); + p.env_decay = 0.1f + rnd(rng, 0.4f); + } + else if (name == "hurt" || name == "hit") { + p.wave_type = static_cast(rnd(rng, 3.0f)); + if (p.wave_type == 2) p.wave_type = 3; // prefer noise over sine + if (p.wave_type == 0) p.duty = rnd(rng, 0.6f); + p.base_freq = 0.2f + rnd(rng, 0.6f); + p.freq_ramp = -0.3f - rnd(rng, 0.4f); + p.env_attack = 0.0f; + p.env_sustain = rnd(rng, 0.1f); + p.env_decay = 0.1f + rnd(rng, 0.2f); + if (rnd01(rng) < 0.5f) p.hpf_freq = rnd(rng, 0.3f); + } + else if (name == "jump") { + p.wave_type = 0; // square + p.duty = rnd(rng, 0.6f); + p.base_freq = 0.3f + rnd(rng, 0.3f); + p.freq_ramp = 0.1f + rnd(rng, 0.2f); + p.env_attack = 0.0f; + p.env_sustain = 0.1f + rnd(rng, 0.3f); + p.env_decay = 0.1f + rnd(rng, 0.2f); + if (rnd01(rng) < 0.5f) p.hpf_freq = rnd(rng, 0.3f); + if (rnd01(rng) < 0.5f) p.lpf_freq = 1.0f - rnd(rng, 0.6f); + } + else if (name == "blip" || name == "select") { + p.wave_type = static_cast(rnd(rng, 2.0f)); + if (p.wave_type == 0) p.duty = rnd(rng, 0.6f); + p.base_freq = 0.2f + rnd(rng, 0.4f); + p.env_attack = 0.0f; + p.env_sustain = 0.1f + rnd(rng, 0.1f); + p.env_decay = rnd(rng, 0.2f); + p.hpf_freq = 0.1f; + } + else { + return false; + } + + return true; +} + +// ============================================================================ +// Mutate +// ============================================================================ + +SfxrParams sfxr_mutate_params(const SfxrParams& base, float amount, std::mt19937& rng) { + SfxrParams p = base; + std::uniform_real_distribution dist(-1.0f, 1.0f); + + auto jitter = [&](float val) -> float { + return std::max(0.0f, std::min(1.0f, val + dist(rng) * amount)); + }; + auto jitterSigned = [&](float val) -> float { + return std::max(-1.0f, std::min(1.0f, val + dist(rng) * amount)); + }; + + p.base_freq = jitter(p.base_freq); + p.freq_ramp = jitterSigned(p.freq_ramp); + p.freq_dramp = jitterSigned(p.freq_dramp); + p.duty = jitter(p.duty); + p.duty_ramp = jitterSigned(p.duty_ramp); + p.vib_strength = jitter(p.vib_strength); + p.vib_speed = jitter(p.vib_speed); + p.env_attack = jitter(p.env_attack); + p.env_sustain = jitter(p.env_sustain); + p.env_decay = jitter(p.env_decay); + p.env_punch = jitter(p.env_punch); + p.lpf_freq = jitter(p.lpf_freq); + p.lpf_ramp = jitterSigned(p.lpf_ramp); + p.lpf_resonance = jitter(p.lpf_resonance); + p.hpf_freq = jitter(p.hpf_freq); + p.hpf_ramp = jitterSigned(p.hpf_ramp); + p.pha_offset = jitterSigned(p.pha_offset); + p.pha_ramp = jitterSigned(p.pha_ramp); + p.repeat_speed = jitter(p.repeat_speed); + p.arp_speed = jitter(p.arp_speed); + p.arp_mod = jitterSigned(p.arp_mod); + + return p; +} + +// ============================================================================ +// Convert params to Python dict +// ============================================================================ + +PyObject* sfxr_params_to_dict(const SfxrParams& p) { + PyObject* d = PyDict_New(); + if (!d) return NULL; + + PyDict_SetItemString(d, "wave_type", PyLong_FromLong(p.wave_type)); + PyDict_SetItemString(d, "base_freq", PyFloat_FromDouble(p.base_freq)); + PyDict_SetItemString(d, "freq_limit", PyFloat_FromDouble(p.freq_limit)); + PyDict_SetItemString(d, "freq_ramp", PyFloat_FromDouble(p.freq_ramp)); + PyDict_SetItemString(d, "freq_dramp", PyFloat_FromDouble(p.freq_dramp)); + PyDict_SetItemString(d, "duty", PyFloat_FromDouble(p.duty)); + PyDict_SetItemString(d, "duty_ramp", PyFloat_FromDouble(p.duty_ramp)); + PyDict_SetItemString(d, "vib_strength", PyFloat_FromDouble(p.vib_strength)); + PyDict_SetItemString(d, "vib_speed", PyFloat_FromDouble(p.vib_speed)); + PyDict_SetItemString(d, "env_attack", PyFloat_FromDouble(p.env_attack)); + PyDict_SetItemString(d, "env_sustain", PyFloat_FromDouble(p.env_sustain)); + PyDict_SetItemString(d, "env_decay", PyFloat_FromDouble(p.env_decay)); + PyDict_SetItemString(d, "env_punch", PyFloat_FromDouble(p.env_punch)); + PyDict_SetItemString(d, "lpf_freq", PyFloat_FromDouble(p.lpf_freq)); + PyDict_SetItemString(d, "lpf_ramp", PyFloat_FromDouble(p.lpf_ramp)); + PyDict_SetItemString(d, "lpf_resonance", PyFloat_FromDouble(p.lpf_resonance)); + PyDict_SetItemString(d, "hpf_freq", PyFloat_FromDouble(p.hpf_freq)); + PyDict_SetItemString(d, "hpf_ramp", PyFloat_FromDouble(p.hpf_ramp)); + PyDict_SetItemString(d, "pha_offset", PyFloat_FromDouble(p.pha_offset)); + PyDict_SetItemString(d, "pha_ramp", PyFloat_FromDouble(p.pha_ramp)); + PyDict_SetItemString(d, "repeat_speed", PyFloat_FromDouble(p.repeat_speed)); + PyDict_SetItemString(d, "arp_speed", PyFloat_FromDouble(p.arp_speed)); + PyDict_SetItemString(d, "arp_mod", PyFloat_FromDouble(p.arp_mod)); + + return d; +} diff --git a/src/audio/SfxrSynth.h b/src/audio/SfxrSynth.h new file mode 100644 index 0000000..1cff73b --- /dev/null +++ b/src/audio/SfxrSynth.h @@ -0,0 +1,54 @@ +#pragma once +#include +#include +#include +#include +#include "Python.h" + +// sfxr parameter set (24 floats + wave_type) +struct SfxrParams { + int wave_type = 0; // 0=square, 1=sawtooth, 2=sine, 3=noise + + float base_freq = 0.3f; // Base frequency + float freq_limit = 0.0f; // Frequency cutoff + float freq_ramp = 0.0f; // Frequency slide + float freq_dramp = 0.0f; // Delta slide + + float duty = 0.5f; // Square wave duty cycle + float duty_ramp = 0.0f; // Duty sweep + + float vib_strength = 0.0f; // Vibrato depth + float vib_speed = 0.0f; // Vibrato speed + + float env_attack = 0.0f; // Envelope attack + float env_sustain = 0.3f; // Envelope sustain + float env_decay = 0.4f; // Envelope decay + float env_punch = 0.0f; // Sustain punch + + float lpf_freq = 1.0f; // Low-pass filter cutoff + float lpf_ramp = 0.0f; // Low-pass filter sweep + float lpf_resonance = 0.0f; // Low-pass filter resonance + + float hpf_freq = 0.0f; // High-pass filter cutoff + float hpf_ramp = 0.0f; // High-pass filter sweep + + float pha_offset = 0.0f; // Phaser offset + float pha_ramp = 0.0f; // Phaser sweep + + float repeat_speed = 0.0f; // Repeat speed + + float arp_speed = 0.0f; // Arpeggiator speed + float arp_mod = 0.0f; // Arpeggiator frequency multiplier +}; + +// Synthesize samples from sfxr parameters (44100 Hz, mono, int16) +std::vector sfxr_synthesize(const SfxrParams& params); + +// Generate preset parameters +bool sfxr_preset(const std::string& name, SfxrParams& out, std::mt19937& rng); + +// Mutate existing parameters +SfxrParams sfxr_mutate_params(const SfxrParams& base, float amount, std::mt19937& rng); + +// Convert params to Python dict +PyObject* sfxr_params_to_dict(const SfxrParams& params); diff --git a/src/platform/HeadlessTypes.h b/src/platform/HeadlessTypes.h index 99f3972..40c066f 100644 --- a/src/platform/HeadlessTypes.h +++ b/src/platform/HeadlessTypes.h @@ -734,15 +734,32 @@ public: // ============================================================================= class SoundBuffer { + unsigned int sampleRate_ = 44100; + unsigned int channelCount_ = 1; + std::size_t sampleCount_ = 0; public: SoundBuffer() = default; // In headless mode, pretend sound loading succeeded bool loadFromFile(const std::string& filename) { return true; } bool loadFromMemory(const void* data, size_t sizeInBytes) { return true; } - Time getDuration() const { return Time(); } + bool loadFromSamples(const Int16* samples, Uint64 sampleCount, unsigned int channelCount, unsigned int sampleRate) { + sampleCount_ = sampleCount; + channelCount_ = channelCount; + sampleRate_ = sampleRate; + return true; + } + Time getDuration() const { + if (sampleRate_ == 0 || channelCount_ == 0) return Time(); + float secs = static_cast(sampleCount_) / static_cast(channelCount_) / static_cast(sampleRate_); + return seconds(secs); + } + unsigned int getSampleRate() const { return sampleRate_; } + unsigned int getChannelCount() const { return channelCount_; } + Uint64 getSampleCount() const { return sampleCount_; } }; class Sound { + float pitch_ = 1.0f; public: enum Status { Stopped, Paused, Playing }; @@ -759,6 +776,8 @@ public: float getVolume() const { return 100.0f; } void setLoop(bool loop) {} bool getLoop() const { return false; } + void setPitch(float pitch) { pitch_ = pitch; } + float getPitch() const { return pitch_; } }; class Music { diff --git a/src/platform/SDL2Types.h b/src/platform/SDL2Types.h index 4e59d81..4dd94de 100644 --- a/src/platform/SDL2Types.h +++ b/src/platform/SDL2Types.h @@ -983,8 +983,52 @@ public: return true; } + bool loadFromSamples(const Int16* samples, Uint64 sampleCount, unsigned int channelCount, unsigned int sampleRate) { + if (chunk_) { Mix_FreeChunk(chunk_); chunk_ = nullptr; } + // Build a WAV file in memory: 44-byte header + PCM data + uint32_t dataSize = static_cast(sampleCount * sizeof(Int16)); + uint32_t fileSize = 44 + dataSize; + std::vector wav(fileSize); + uint8_t* p = wav.data(); + // RIFF header + memcpy(p, "RIFF", 4); p += 4; + uint32_t chunkSize = fileSize - 8; + memcpy(p, &chunkSize, 4); p += 4; + memcpy(p, "WAVE", 4); p += 4; + // fmt sub-chunk + memcpy(p, "fmt ", 4); p += 4; + uint32_t fmtSize = 16; + memcpy(p, &fmtSize, 4); p += 4; + uint16_t audioFormat = 1; // PCM + memcpy(p, &audioFormat, 2); p += 2; + uint16_t numChannels = static_cast(channelCount); + memcpy(p, &numChannels, 2); p += 2; + uint32_t sr = sampleRate; + memcpy(p, &sr, 4); p += 4; + uint32_t byteRate = sampleRate * channelCount * 2; + memcpy(p, &byteRate, 4); p += 4; + uint16_t blockAlign = static_cast(channelCount * 2); + memcpy(p, &blockAlign, 2); p += 2; + uint16_t bitsPerSample = 16; + memcpy(p, &bitsPerSample, 2); p += 2; + // data sub-chunk + memcpy(p, "data", 4); p += 4; + memcpy(p, &dataSize, 4); p += 4; + memcpy(p, samples, dataSize); + // Load via SDL_mixer + SDL_RWops* rw = SDL_RWFromConstMem(wav.data(), static_cast(fileSize)); + if (!rw) return false; + chunk_ = Mix_LoadWAV_RW(rw, 1); + if (!chunk_) return false; + computeDuration(); + return true; + } + Time getDuration() const { return duration_; } Mix_Chunk* getChunk() const { return chunk_; } + unsigned int getSampleRate() const { return 44100; } // SDL_mixer default + unsigned int getChannelCount() const { return 1; } // Approximate + Uint64 getSampleCount() const { return chunk_ ? chunk_->alen / 2 : 0; } private: void computeDuration() { @@ -1106,6 +1150,10 @@ public: void setLoop(bool loop) { loop_ = loop; } bool getLoop() const { return loop_; } + // Pitch: SDL_mixer doesn't support per-channel pitch, so store value only + void setPitch(float pitch) { pitch_ = pitch; } + float getPitch() const { return pitch_; } + // Called by Mix_ChannelFinished callback static void onChannelFinished(int channel) { if (channel >= 0 && channel < 16 && g_channelOwners[channel]) { @@ -1118,6 +1166,7 @@ private: Mix_Chunk* chunk_ = nullptr; // Borrowed from SoundBuffer int channel_ = -1; float volume_ = 100.f; + float pitch_ = 1.0f; bool loop_ = false; }; diff --git a/stubs/mcrfpy/__init__.pyi b/stubs/mcrfpy/__init__.pyi index 5e7b5ac..d080723 100644 --- a/stubs/mcrfpy/__init__.pyi +++ b/stubs/mcrfpy/__init__.pyi @@ -1,213 +1,1413 @@ """Type stubs for McRogueFace Python API. -Auto-generated - do not edit directly. +Core game engine interface for creating roguelike games with Python. """ -from typing import Any, List, Dict, Tuple, Optional, Callable, Union +from typing import Any, List, Dict, Tuple, Optional, Callable, Union, overload -# Module documentation -# McRogueFace Python API -# -# Core game engine interface for creating roguelike games with Python. +# Type aliases - Color tuples are accepted anywhere a Color is expected +ColorLike = Union['Color', Tuple[int, int, int], Tuple[int, int, int, int]] +UIElement = Union['Frame', 'Caption', 'Sprite', 'Grid', 'Line', 'Circle', 'Arc'] +Transition = Union[str, None] + +# Enums + +class Key: + """Keyboard key codes enum. + + All standard keyboard keys are available as class attributes: + A-Z, Num0-Num9, F1-F15, Arrow keys, modifiers, etc. + """ + A: 'Key' + B: 'Key' + C: 'Key' + D: 'Key' + E: 'Key' + F: 'Key' + G: 'Key' + H: 'Key' + I: 'Key' + J: 'Key' + K: 'Key' + L: 'Key' + M: 'Key' + N: 'Key' + O: 'Key' + P: 'Key' + Q: 'Key' + R: 'Key' + S: 'Key' + T: 'Key' + U: 'Key' + V: 'Key' + W: 'Key' + X: 'Key' + Y: 'Key' + Z: 'Key' + Num0: 'Key' + Num1: 'Key' + Num2: 'Key' + Num3: 'Key' + Num4: 'Key' + Num5: 'Key' + Num6: 'Key' + Num7: 'Key' + Num8: 'Key' + Num9: 'Key' + ESCAPE: 'Key' + ENTER: 'Key' + SPACE: 'Key' + TAB: 'Key' + BACKSPACE: 'Key' + DELETE: 'Key' + UP: 'Key' + DOWN: 'Key' + LEFT: 'Key' + RIGHT: 'Key' + LSHIFT: 'Key' + RSHIFT: 'Key' + LCTRL: 'Key' + RCTRL: 'Key' + LALT: 'Key' + RALT: 'Key' + F1: 'Key' + F2: 'Key' + F3: 'Key' + F4: 'Key' + F5: 'Key' + F6: 'Key' + F7: 'Key' + F8: 'Key' + F9: 'Key' + F10: 'Key' + F11: 'Key' + F12: 'Key' + F13: 'Key' + F14: 'Key' + F15: 'Key' + HOME: 'Key' + END: 'Key' + PAGEUP: 'Key' + PAGEDOWN: 'Key' + INSERT: 'Key' + PAUSE: 'Key' + TILDE: 'Key' + MINUS: 'Key' + EQUAL: 'Key' + LBRACKET: 'Key' + RBRACKET: 'Key' + BACKSLASH: 'Key' + SEMICOLON: 'Key' + QUOTE: 'Key' + COMMA: 'Key' + PERIOD: 'Key' + SLASH: 'Key' + # NUM_ aliases for Num keys + NUM_0: 'Key' + NUM_1: 'Key' + NUM_2: 'Key' + NUM_3: 'Key' + NUM_4: 'Key' + NUM_5: 'Key' + NUM_6: 'Key' + NUM_7: 'Key' + NUM_8: 'Key' + NUM_9: 'Key' + NUMPAD0: 'Key' + NUMPAD1: 'Key' + NUMPAD2: 'Key' + NUMPAD3: 'Key' + NUMPAD4: 'Key' + NUMPAD5: 'Key' + NUMPAD6: 'Key' + NUMPAD7: 'Key' + NUMPAD8: 'Key' + NUMPAD9: 'Key' + +class MouseButton: + """Mouse button enum for click callbacks.""" + LEFT: 'MouseButton' + RIGHT: 'MouseButton' + MIDDLE: 'MouseButton' + +class InputState: + """Input action state enum for callbacks.""" + PRESSED: 'InputState' + RELEASED: 'InputState' + +class Easing: + """Animation easing function enum. + + Available easing functions for smooth animations. + """ + LINEAR: 'Easing' + EASE_IN: 'Easing' + EASE_OUT: 'Easing' + EASE_IN_OUT: 'Easing' + EASE_IN_QUAD: 'Easing' + EASE_OUT_QUAD: 'Easing' + EASE_IN_OUT_QUAD: 'Easing' + EASE_IN_CUBIC: 'Easing' + EASE_OUT_CUBIC: 'Easing' + EASE_IN_OUT_CUBIC: 'Easing' + EASE_IN_QUART: 'Easing' + EASE_OUT_QUART: 'Easing' + EASE_IN_OUT_QUART: 'Easing' + EASE_IN_SINE: 'Easing' + EASE_OUT_SINE: 'Easing' + EASE_IN_OUT_SINE: 'Easing' + EASE_IN_EXPO: 'Easing' + EASE_OUT_EXPO: 'Easing' + EASE_IN_OUT_EXPO: 'Easing' + EASE_IN_CIRC: 'Easing' + EASE_OUT_CIRC: 'Easing' + EASE_IN_OUT_CIRC: 'Easing' + EASE_IN_ELASTIC: 'Easing' + EASE_OUT_ELASTIC: 'Easing' + EASE_IN_OUT_ELASTIC: 'Easing' + EASE_IN_BACK: 'Easing' + EASE_OUT_BACK: 'Easing' + EASE_IN_OUT_BACK: 'Easing' + EASE_IN_BOUNCE: 'Easing' + EASE_OUT_BOUNCE: 'Easing' + EASE_IN_OUT_BOUNCE: 'Easing' # Classes -class Animation: - """Animation object for animating UI properties""" - def __init__(selftype(self)) -> None: ... - - def complete(self) -> None: ... - def get_current_value(self) -> Any: ... - def hasValidTarget(self) -> bool: ... - def start(selftarget: UIDrawable) -> None: ... - def update(selfdelta_time: float) -> bool: ... - -class Caption: - """Caption(pos=None, font=None, text='', **kwargs)""" - def __init__(selftype(self)) -> None: ... - - def get_bounds(self) -> tuple: ... - def move(selfdx: float, dy: float) -> None: ... - def resize(selfwidth: float, height: float) -> None: ... - class Color: - """SFML Color Object""" - def __init__(selftype(self)) -> None: ... + """SFML Color Object for RGBA colors.""" - def from_hex(selfhex_string: str) -> Color: ... - def lerp(selfother: Color, t: float) -> Color: ... - def to_hex(self) -> str: ... + r: int + g: int + b: int + a: int -class Drawable: - """Base class for all drawable UI elements""" - def __init__(selftype(self)) -> None: ... + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, r: int, g: int, b: int, a: int = 255) -> None: ... - def get_bounds(self) -> tuple: ... - def move(selfdx: float, dy: float) -> None: ... - def resize(selfwidth: float, height: float) -> None: ... + def from_hex(self, hex_string: str) -> 'Color': + """Create color from hex string (e.g., '#FF0000' or 'FF0000').""" + ... -class Entity: - """Entity(grid_pos=None, texture=None, sprite_index=0, **kwargs)""" - def __init__(selftype(self)) -> None: ... + def to_hex(self) -> str: + """Convert color to hex string format.""" + ... - def at(self, *args, **kwargs) -> Any: ... - def die(self, *args, **kwargs) -> Any: ... - def get_bounds(self) -> tuple: ... - def index(self, *args, **kwargs) -> Any: ... - def move(selfdx: float, dy: float) -> None: ... - def path_to(selfx: int, y: int) -> bool: ... - def resize(selfwidth: float, height: float) -> None: ... - def update_visibility(self) -> None: ... - -class EntityCollection: - """Iterable, indexable collection of Entities""" - def __init__(selftype(self)) -> None: ... - - def append(self, *args, **kwargs) -> Any: ... - def count(self, *args, **kwargs) -> Any: ... - def extend(self, *args, **kwargs) -> Any: ... - def index(self, *args, **kwargs) -> Any: ... - def remove(self, *args, **kwargs) -> Any: ... - -class Font: - """SFML Font Object""" - def __init__(selftype(self)) -> None: ... - -class Frame: - """Frame(pos=None, size=None, **kwargs)""" - def __init__(selftype(self)) -> None: ... - - def get_bounds(self) -> tuple: ... - def move(selfdx: float, dy: float) -> None: ... - def resize(selfwidth: float, height: float) -> None: ... - -class Grid: - """Grid(pos=None, size=None, grid_size=None, texture=None, **kwargs)""" - def __init__(selftype(self)) -> None: ... - - def at(self, *args, **kwargs) -> Any: ... - def compute_astar_path(selfx1: int, y1: int, x2: int, y2: int, diagonal_cost: float = 1.41) -> List[Tuple[int, int]]: ... - def compute_dijkstra(selfroot_x: int, root_y: int, diagonal_cost: float = 1.41) -> None: ... - def compute_fov(selfx: int, y: int, radius: int = 0, light_walls: bool = True, algorithm: int = FOV_BASIC) -> List[Tuple[int, int, bool, bool]]: ... - def find_path(selfx1: int, y1: int, x2: int, y2: int, diagonal_cost: float = 1.41) -> List[Tuple[int, int]]: ... - def get_bounds(self) -> tuple: ... - def get_dijkstra_distance(selfx: int, y: int) -> Optional[float]: ... - def get_dijkstra_path(selfx: int, y: int) -> List[Tuple[int, int]]: ... - def is_in_fov(selfx: int, y: int) -> bool: ... - def move(selfdx: float, dy: float) -> None: ... - def resize(selfwidth: float, height: float) -> None: ... - -class GridPoint: - """UIGridPoint object""" - def __init__(selftype(self)) -> None: ... - -class GridPointState: - """UIGridPointState object""" - def __init__(selftype(self)) -> None: ... - -class Scene: - """Base class for object-oriented scenes""" - def __init__(selftype(self)) -> None: ... - - def activate(self) -> None: ... - def get_ui(self) -> UICollection: ... - def register_keyboard(selfcallback: callable) -> None: ... - -class Sprite: - """Sprite(pos=None, texture=None, sprite_index=0, **kwargs)""" - def __init__(selftype(self)) -> None: ... - - def get_bounds(self) -> tuple: ... - def move(selfdx: float, dy: float) -> None: ... - def resize(selfwidth: float, height: float) -> None: ... - -class Texture: - """SFML Texture Object""" - def __init__(selftype(self)) -> None: ... - -class Timer: - """Timer(name, callback, interval, once=False)""" - def __init__(selftype(self)) -> None: ... - - def cancel(self) -> None: ... - def pause(self) -> None: ... - def restart(self) -> None: ... - def resume(self) -> None: ... - -class UICollection: - """Iterable, indexable collection of UI objects""" - def __init__(selftype(self)) -> None: ... - - def append(self, *args, **kwargs) -> Any: ... - def count(self, *args, **kwargs) -> Any: ... - def extend(self, *args, **kwargs) -> Any: ... - def index(self, *args, **kwargs) -> Any: ... - def remove(self, *args, **kwargs) -> Any: ... - -class UICollectionIter: - """Iterator for a collection of UI objects""" - def __init__(selftype(self)) -> None: ... - -class UIEntityCollectionIter: - """Iterator for a collection of UI objects""" - def __init__(selftype(self)) -> None: ... + def lerp(self, other: 'Color', t: float) -> 'Color': + """Linear interpolation between two colors.""" + ... class Vector: - """SFML Vector Object""" - def __init__(selftype(self)) -> None: ... + """SFML Vector Object for 2D coordinates.""" - def angle(self) -> float: ... - def copy(self) -> Vector: ... - def distance_to(selfother: Vector) -> float: ... - def dot(selfother: Vector) -> float: ... - def magnitude(self) -> float: ... - def magnitude_squared(self) -> float: ... - def normalize(self) -> Vector: ... + x: float + y: float + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, x: float, y: float) -> None: ... + + def add(self, other: 'Vector') -> 'Vector': ... + def subtract(self, other: 'Vector') -> 'Vector': ... + def multiply(self, scalar: float) -> 'Vector': ... + def divide(self, scalar: float) -> 'Vector': ... + def distance(self, other: 'Vector') -> float: ... + def normalize(self) -> 'Vector': ... + def dot(self, other: 'Vector') -> float: ... + +class Texture: + """SFML Texture Object for sprite sheet images.""" + + def __init__(self, filename: str, sprite_width: int = 0, sprite_height: int = 0) -> None: ... + + filename: str + width: int + height: int + sprite_count: int + + @classmethod + def from_bytes(cls, data: bytes, width: int, height: int, + sprite_width: int, sprite_height: int, + name: str = "") -> 'Texture': + """Create texture from raw RGBA bytes.""" + ... + + @classmethod + def composite(cls, textures: List['Texture'], + sprite_width: int, sprite_height: int, + name: str = "") -> 'Texture': + """Composite multiple textures into one by layering sprites.""" + ... + + def hsl_shift(self, hue_shift: float, sat_shift: float = 0.0, + lit_shift: float = 0.0) -> 'Texture': + """Return a new texture with HSL color shift applied.""" + ... + +class Font: + """SFML Font Object for text rendering.""" + + def __init__(self, filename: str) -> None: ... + + filename: str + family: str + +class SoundBuffer: + """In-memory audio buffer for procedural audio generation and DSP effects. + + Supports tone synthesis, sfxr-style retro sound effects, DSP processing, + and composition. Effects return new SoundBuffer instances (copy-modify pattern). + """ + + def __init__(self, filename: str) -> None: + """Load audio from a file into memory.""" + ... + + @property + def duration(self) -> float: + """Duration in seconds (read-only).""" + ... + + @property + def sample_count(self) -> int: + """Number of samples (read-only).""" + ... + + @property + def sample_rate(self) -> int: + """Sample rate in Hz (read-only).""" + ... + + @property + def channels(self) -> int: + """Number of audio channels (read-only).""" + ... + + @property + def sfxr_params(self) -> Optional[Dict[str, Any]]: + """sfxr synthesis parameters, or None if not sfxr-generated (read-only).""" + ... + + # --- Factory class methods --- + + @classmethod + def from_samples(cls, data: bytes, channels: int, sample_rate: int) -> 'SoundBuffer': + """Create SoundBuffer from raw int16 PCM sample data.""" + ... + + @overload + @classmethod + def tone(cls, frequency: float, duration: float, waveform: str) -> 'SoundBuffer': ... + + @overload + @classmethod + def tone(cls, frequency: float, duration: float, waveform: str, *, + attack: float = ..., decay: float = ..., + sustain: float = ..., release: float = ..., + sample_rate: int = ...) -> 'SoundBuffer': ... + + @classmethod + def tone(cls, frequency: float, duration: float, waveform: str, **kwargs: Any) -> 'SoundBuffer': + """Generate a tone with specified waveform and optional ADSR envelope. + + Args: + frequency: Frequency in Hz. + duration: Duration in seconds. + waveform: One of "sine", "square", "saw", "triangle", "noise". + attack: ADSR attack time in seconds. + decay: ADSR decay time in seconds. + sustain: ADSR sustain level (0.0-1.0). + release: ADSR release time in seconds. + sample_rate: Sample rate (default 44100). + """ + ... + + @overload + @classmethod + def sfxr(cls, preset: str, *, seed: Optional[int] = ...) -> 'SoundBuffer': ... + + @overload + @classmethod + def sfxr(cls, *, wave_type: int = ..., base_freq: float = ..., + freq_limit: float = ..., freq_ramp: float = ..., + freq_dramp: float = ..., duty: float = ..., + duty_ramp: float = ..., vib_strength: float = ..., + vib_speed: float = ..., env_attack: float = ..., + env_sustain: float = ..., env_decay: float = ..., + env_punch: float = ..., lpf_freq: float = ..., + lpf_ramp: float = ..., lpf_resonance: float = ..., + hpf_freq: float = ..., hpf_ramp: float = ..., + pha_offset: float = ..., pha_ramp: float = ..., + repeat_speed: float = ..., arp_speed: float = ..., + arp_mod: float = ..., seed: Optional[int] = ...) -> 'SoundBuffer': ... + + @classmethod + def sfxr(cls, *args: Any, **kwargs: Any) -> 'SoundBuffer': + """Generate sfxr-style retro sound effect. + + Can be called with a preset name ("coin", "laser", "explosion", + "powerup", "hurt", "jump", "blip") or with custom sfxr parameters. + + Args: + preset: Name of sfxr preset. + seed: Random seed for deterministic generation. + **kwargs: Individual sfxr parameters (wave_type, base_freq, etc.) + """ + ... + + @classmethod + def concat(cls, buffers: List['SoundBuffer'], overlap: float = 0.0) -> 'SoundBuffer': + """Concatenate multiple SoundBuffers with optional crossfade overlap.""" + ... + + @classmethod + def mix(cls, buffers: List['SoundBuffer']) -> 'SoundBuffer': + """Mix (add) multiple SoundBuffers together, padding to longest.""" + ... + + # --- DSP effects (each returns a new SoundBuffer) --- + + def pitch_shift(self, factor: float) -> 'SoundBuffer': + """Resample to shift pitch. Factor > 1.0 = higher pitch, shorter duration.""" + ... + + def low_pass(self, cutoff_hz: float) -> 'SoundBuffer': + """Apply single-pole IIR low-pass filter.""" + ... + + def high_pass(self, cutoff_hz: float) -> 'SoundBuffer': + """Apply high-pass filter (complement of low-pass).""" + ... + + def echo(self, delay_ms: float, feedback: float, wet: float) -> 'SoundBuffer': + """Apply echo effect with delay, feedback, and wet/dry mix.""" + ... + + def reverb(self, room_size: float, damping: float, wet: float) -> 'SoundBuffer': + """Apply simplified Freeverb-style reverb.""" + ... + + def distortion(self, drive: float) -> 'SoundBuffer': + """Apply tanh soft-clipping distortion.""" + ... + + def bit_crush(self, bits: int, rate_divisor: int) -> 'SoundBuffer': + """Apply bit depth reduction and sample rate crushing.""" + ... + + def normalize(self) -> 'SoundBuffer': + """Scale samples to 95% of maximum amplitude.""" + ... + + def reverse(self) -> 'SoundBuffer': + """Reverse sample order.""" + ... + + def slice(self, start: float, end: float) -> 'SoundBuffer': + """Extract a time range (in seconds) as a new SoundBuffer.""" + ... + + def sfxr_mutate(self, amount: float, seed: Optional[int] = None) -> 'SoundBuffer': + """Jitter sfxr parameters by +/- amount and re-synthesize. Only works on sfxr-generated buffers.""" + ... + + +class Sound: + """Sound effect object for short audio clips. + + Can be created from a filename string or a SoundBuffer object. + """ + + @overload + def __init__(self, filename: str) -> None: ... + @overload + def __init__(self, buffer: SoundBuffer) -> None: ... + def __init__(self, source: Union[str, SoundBuffer]) -> None: ... + + volume: float + loop: bool + playing: bool # Read-only + duration: float # Read-only + source: str # Read-only + pitch: float + + @property + def buffer(self) -> Optional[SoundBuffer]: + """The SoundBuffer, or None for file-loaded sounds (read-only).""" + ... + + def play(self) -> None: + """Play the sound effect.""" + ... + + def pause(self) -> None: + """Pause playback.""" + ... + + def stop(self) -> None: + """Stop playback.""" + ... + + def play_varied(self, *, pitch_range: float = 0.1, volume_range: float = 3.0) -> None: + """Play with randomized pitch and volume variation.""" + ... + +class Music: + """Streaming music object for longer audio tracks.""" + + def __init__(self, filename: str) -> None: ... + + volume: float + loop: bool + playing: bool # Read-only + duration: float # Read-only + position: float # Playback position in seconds + source: str # Read-only + + def play(self) -> None: + """Play the music.""" + ... + + def pause(self) -> None: + """Pause playback.""" + ... + + def stop(self) -> None: + """Stop playback.""" + ... + +class Drawable: + """Base class for all drawable UI elements.""" + + x: float + y: float + visible: bool + z_index: int + name: str + pos: Vector + opacity: float + + # Mouse event callbacks (#140, #141, #230) + # on_click receives (pos: Vector, button: MouseButton, action: InputState) + on_click: Optional[Callable[['Vector', 'MouseButton', 'InputState'], None]] + # Hover callbacks receive only position per #230 + on_enter: Optional[Callable[['Vector'], None]] + on_exit: Optional[Callable[['Vector'], None]] + on_move: Optional[Callable[['Vector'], None]] + + # Read-only hover state (#140) + hovered: bool + + def get_bounds(self) -> Tuple[float, float, float, float]: + """Get bounding box as (x, y, width, height).""" + ... + + def move(self, dx: float, dy: float) -> None: + """Move by relative offset (dx, dy).""" + ... + + def resize(self, width: float, height: float) -> None: + """Resize to new dimensions (width, height).""" + ... + + def animate(self, property: str, end_value: Any, duration: float, + easing: Union[str, 'Easing'] = 'linear', + callback: Optional[Callable[[Any, str, Any], None]] = None) -> None: + """Animate a property to a target value over duration seconds.""" + ... + +class Frame(Drawable): + """Frame(x=0, y=0, w=0, h=0, fill_color=None, outline_color=None, outline=0, on_click=None, children=None) + + A rectangular frame UI element that can contain other drawable elements. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, x: float = 0, y: float = 0, w: float = 0, h: float = 0, + fill_color: Optional[ColorLike] = None, outline_color: Optional[ColorLike] = None, + outline: float = 0, on_click: Optional[Callable] = None, + children: Optional[List[UIElement]] = None) -> None: ... + @overload + def __init__(self, pos: Tuple[float, float], size: Tuple[float, float], + fill_color: Optional[ColorLike] = None, outline_color: Optional[ColorLike] = None, + outline: float = 0, on_click: Optional[Callable] = None, + children: Optional[List[UIElement]] = None) -> None: ... + + w: float + h: float + fill_color: ColorLike + outline_color: ColorLike + outline: float + children: 'UICollection' + clip_children: bool + +class Caption(Drawable): + """Caption(pos=None, font=None, text='', fill_color=None, ...) + + A text display UI element with customizable font and styling. + Positional args: pos, font, text. Everything else is keyword-only. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, pos: Optional[Tuple[float, float]] = None, + font: Optional[Font] = None, text: str = '', + fill_color: Optional[ColorLike] = None, + outline_color: Optional[ColorLike] = None, outline: float = 0, + font_size: float = 16.0, on_click: Optional[Callable] = None, + visible: bool = True, opacity: float = 1.0, + z_index: int = 0, name: str = '', + x: float = 0, y: float = 0) -> None: ... + + text: str + font: Font + fill_color: ColorLike + outline_color: ColorLike + outline: float + font_size: float + w: float # Read-only, computed from text + h: float # Read-only, computed from text + +class Sprite(Drawable): + """Sprite(pos=None, texture=None, sprite_index=0, scale=1.0, on_click=None) + + A sprite UI element that displays a texture or portion of a texture atlas. + Positional args: pos, texture, sprite_index. Everything else is keyword-only. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, pos: Optional[Tuple[float, float]] = None, + texture: Optional[Texture] = None, + sprite_index: int = 0, scale: float = 1.0, + on_click: Optional[Callable] = None, + visible: bool = True, opacity: float = 1.0, + z_index: int = 0, name: str = '', + x: float = 0, y: float = 0) -> None: ... + + texture: Texture + sprite_index: int + sprite_number: int # Deprecated alias for sprite_index + scale: float + w: float # Read-only, computed from texture + h: float # Read-only, computed from texture + +class Grid(Drawable): + """Grid(pos=(0,0), size=(0,0), grid_size=(2,2), texture=None, ...) + + A grid-based tilemap UI element for rendering tile-based levels and game worlds. + Supports layers, FOV, pathfinding, and entity management. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, pos: Tuple[float, float] = (0, 0), + size: Tuple[float, float] = (0, 0), + grid_size: Tuple[int, int] = (2, 2), + texture: Optional[Texture] = None, + fill_color: Optional[ColorLike] = None, + on_click: Optional[Callable] = None, + center_x: float = 0, center_y: float = 0, zoom: float = 1.0, + visible: bool = True, opacity: float = 1.0, + z_index: int = 0, name: str = '', + layers: Optional[List[Union['ColorLayer', 'TileLayer']]] = None) -> None: ... + + # Dimensions + grid_size: Vector # Read-only - has .x (width) and .y (height) + grid_w: int # Read-only + grid_h: int # Read-only + + # Position and size + position: Tuple[float, float] + size: Vector + w: float + h: float + + # Camera/viewport + center: Union[Vector, Tuple[float, float]] # Viewport center point (pixel coordinates) + center_x: float + center_y: float + zoom: float # Scale factor for rendering + + # Collections + entities: 'EntityCollection' # Entities on this grid + children: 'UICollection' # UI overlays (speech bubbles, effects) + layers: Tuple[Union['ColorLayer', 'TileLayer'], ...] # Grid layers sorted by z_index + + # Appearance + texture: Texture # Read-only + fill_color: ColorLike # Background fill color + + # Perspective/FOV + perspective: Optional['Entity'] # Entity for FOV rendering (None = omniscient) + perspective_enabled: bool # Whether to use perspective-based FOV + fov: 'FOV' # FOV algorithm enum + fov_radius: int # Default FOV radius + + # Cell-level mouse events (#230) + on_cell_click: Optional[Callable[['Vector', 'MouseButton', 'InputState'], None]] + on_cell_enter: Optional[Callable[['Vector'], None]] + on_cell_exit: Optional[Callable[['Vector'], None]] + hovered_cell: Optional[Tuple[int, int]] # Read-only + + def at(self, x: int, y: int) -> 'GridPoint': + """Get grid point at tile coordinates.""" + ... + + def center_camera(self, pos: Optional[Tuple[float, float]] = None) -> None: + """Center the camera on a tile coordinate.""" + ... + + # FOV methods + def compute_fov(self, pos: Tuple[int, int], radius: int = 0, + light_walls: bool = True, algorithm: Optional['FOV'] = None) -> None: + """Compute field of view from a position.""" + ... + + def is_in_fov(self, pos: Tuple[int, int]) -> bool: + """Check if a cell is in the field of view.""" + ... + + # Pathfinding methods + def find_path(self, start: Union[Tuple[int, int], 'Vector', 'Entity'], + end: Union[Tuple[int, int], 'Vector', 'Entity'], + diagonal_cost: float = 1.41) -> Optional['AStarPath']: + """Compute A* path between two points.""" + ... + + def get_dijkstra_map(self, root: Union[Tuple[int, int], 'Vector', 'Entity'], + diagonal_cost: float = 1.41) -> 'DijkstraMap': + """Get or create a Dijkstra distance map for a root position.""" + ... + + def clear_dijkstra_maps(self) -> None: + """Clear all cached Dijkstra maps.""" + ... + + # Layer methods + def add_layer(self, layer: Union['ColorLayer', 'TileLayer']) -> None: + """Add a pre-constructed layer to the grid.""" + ... + + def remove_layer(self, layer: Union['ColorLayer', 'TileLayer']) -> None: + """Remove a layer from the grid.""" + ... + + def layer(self, name: str) -> Optional[Union['ColorLayer', 'TileLayer']]: + """Get layer by name.""" + ... + + # Spatial queries + def entities_in_radius(self, pos: Union[Tuple[float, float], 'Vector'], + radius: float) -> List['Entity']: + """Query entities within radius using spatial hash.""" + ... + + # HeightMap application + def apply_threshold(self, source: 'HeightMap', range: Tuple[float, float], + walkable: Optional[bool] = None, + transparent: Optional[bool] = None) -> 'Grid': + """Apply walkable/transparent properties where heightmap values are in range.""" + ... + + def apply_ranges(self, source: 'HeightMap', + ranges: List[Tuple[Tuple[float, float], Dict[str, bool]]]) -> 'Grid': + """Apply multiple thresholds in a single pass.""" + ... + +class Line(Drawable): + """Line(start=None, end=None, thickness=1.0, color=None, on_click=None, **kwargs) + + A line UI element for drawing straight lines between two points. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, start: Optional[Tuple[float, float]] = None, + end: Optional[Tuple[float, float]] = None, + thickness: float = 1.0, color: Optional[ColorLike] = None, + on_click: Optional[Callable] = None) -> None: ... + + start: Vector + end: Vector + thickness: float + color: ColorLike + +class Circle(Drawable): + """Circle(radius=0, center=None, fill_color=None, outline_color=None, outline=0, on_click=None, **kwargs) + + A circle UI element for drawing filled or outlined circles. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, radius: float = 0, center: Optional[Tuple[float, float]] = None, + fill_color: Optional[ColorLike] = None, outline_color: Optional[ColorLike] = None, + outline: float = 0, on_click: Optional[Callable] = None) -> None: ... + + radius: float + center: Vector + fill_color: ColorLike + outline_color: ColorLike + outline: float + +class Arc(Drawable): + """Arc(center=None, radius=0, start_angle=0, end_angle=90, color=None, thickness=1, on_click=None, **kwargs) + + An arc UI element for drawing curved line segments. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, center: Optional[Tuple[float, float]] = None, radius: float = 0, + start_angle: float = 0, end_angle: float = 90, + color: Optional[ColorLike] = None, thickness: float = 1.0, + on_click: Optional[Callable] = None) -> None: ... + + center: Vector + radius: float + start_angle: float + end_angle: float + color: ColorLike + thickness: float + +class GridPoint: + """Grid point representing a single tile's properties. + + Accessed via Grid.at(x, y). Controls walkability and transparency + for pathfinding and FOV calculations. + """ + + walkable: bool # Whether entities can walk through this cell + transparent: bool # Whether light/sight passes through this cell + tilesprite: int # Tile sprite index (legacy property, not in dir()) + entities: List['Entity'] # Read-only list of entities at this cell + grid_pos: Tuple[int, int] # Read-only (x, y) position in grid + +class GridPointState: + """Per-entity visibility state for a grid cell. + + Tracks what an entity has seen/discovered. Accessed via entity perspective system. + """ + + visible: bool # Currently visible in FOV + discovered: bool # Has been seen at least once + point: Optional['GridPoint'] # The GridPoint at this position (None if not discovered) + +class ColorLayer: + """A color overlay layer for Grid. + + Provides per-cell color values for tinting, fog of war, etc. + Construct independently, then add to a Grid via grid.add_layer(). + """ + + def __init__(self, z_index: int = -1, name: Optional[str] = None, + grid_size: Optional[Tuple[int, int]] = None) -> None: ... + + z_index: int + name: str # Read-only + visible: bool + grid_size: Vector # Read-only + grid: Optional['Grid'] + + def fill(self, color: ColorLike) -> None: + """Fill entire layer with a single color.""" + ... + + def set(self, pos: Tuple[int, int], color: ColorLike) -> None: + """Set color at a specific cell.""" + ... + + def get(self, pos: Tuple[int, int]) -> Color: + """Get color at a specific cell.""" + ... + +class TileLayer: + """A tile sprite layer for Grid. + + Provides per-cell tile indices for multi-layer tile rendering. + Construct independently, then add to a Grid via grid.add_layer(). + """ + + def __init__(self, z_index: int = -1, name: Optional[str] = None, + texture: Optional[Texture] = None, + grid_size: Optional[Tuple[int, int]] = None) -> None: ... + + z_index: int + name: str # Read-only + visible: bool + texture: Optional[Texture] + grid_size: Vector # Read-only + grid: Optional['Grid'] + + def fill(self, tile_index: int) -> None: + """Fill entire layer with a single tile index.""" + ... + + def set(self, pos: Tuple[int, int], tile_index: int) -> None: + """Set tile index at a specific cell. Use -1 for transparent.""" + ... + + def get(self, pos: Tuple[int, int]) -> int: + """Get tile index at a specific cell.""" + ... + +class FOV: + """Field of view algorithm enum. + + Available algorithms: + - FOV.BASIC: Simple raycasting + - FOV.DIAMOND: Diamond-shaped FOV + - FOV.SHADOW: Shadow casting (recommended) + - FOV.PERMISSIVE_0 through FOV.PERMISSIVE_8: Permissive algorithms + - FOV.RESTRICTIVE: Restrictive precise angle shadowcasting + """ + + BASIC: 'FOV' + DIAMOND: 'FOV' + SHADOW: 'FOV' + PERMISSIVE_0: 'FOV' + PERMISSIVE_1: 'FOV' + PERMISSIVE_2: 'FOV' + PERMISSIVE_3: 'FOV' + PERMISSIVE_4: 'FOV' + PERMISSIVE_5: 'FOV' + PERMISSIVE_6: 'FOV' + PERMISSIVE_7: 'FOV' + PERMISSIVE_8: 'FOV' + RESTRICTIVE: 'FOV' + +class AStarPath: + """A* pathfinding result. + + Returned by Grid.find_path(). Can be iterated or walked step-by-step. + """ + + def __iter__(self) -> Any: ... + def __len__(self) -> int: ... + + def walk(self) -> Optional[Tuple[int, int]]: + """Get next step in path, or None if complete.""" + ... + + def reverse(self) -> 'AStarPath': + """Return a reversed copy of the path.""" + ... + +class DijkstraMap: + """Dijkstra distance map for pathfinding. + + Created by Grid.get_dijkstra_map(). Provides distance queries + and path finding from the root position. + """ + + root: Tuple[int, int] # Read-only root position + + def get_distance(self, pos: Tuple[int, int]) -> float: + """Get distance from root to position (-1 if unreachable).""" + ... + + def get_path(self, pos: Tuple[int, int]) -> Optional[List[Tuple[int, int]]]: + """Get path from position to root.""" + ... + +class HeightMap: + """2D height field for terrain generation. + + Used for procedural generation and applying terrain to grids. + """ + + width: int # Read-only + height: int # Read-only + + def __init__(self, width: int, height: int) -> None: ... + + def get(self, x: int, y: int) -> float: + """Get height value at position.""" + ... + + def set(self, x: int, y: int, value: float) -> None: + """Set height value at position.""" + ... + + def fill(self, value: float) -> 'HeightMap': + """Fill entire heightmap with a value.""" + ... + + def clear(self) -> 'HeightMap': + """Clear heightmap to 0.""" + ... + + def normalize(self, min_val: float = 0.0, max_val: float = 1.0) -> 'HeightMap': + """Normalize values to range.""" + ... + + def add_hill(self, center: Tuple[float, float], radius: float, height: float) -> 'HeightMap': + """Add a hill at position.""" + ... + + def add_fbm(self, noise: 'NoiseSource', mulx: float = 1.0, muly: float = 1.0, + addx: float = 0.0, addy: float = 0.0, octaves: int = 4, + delta: float = 1.0, scale: float = 1.0) -> 'HeightMap': + """Add fractal Brownian motion noise.""" + ... + + def scale(self, factor: float) -> 'HeightMap': + """Scale all values by factor.""" + ... + + def clamp(self, min_val: float, max_val: float) -> 'HeightMap': + """Clamp values to range.""" + ... + +class NoiseSource: + """Coherent noise generator for procedural generation. + + Supports various noise types: PERLIN, SIMPLEX, WAVELET, etc. + """ + + def __init__(self, type: str = 'SIMPLEX', seed: Optional[int] = None) -> None: ... + + def get(self, x: float, y: float, z: float = 0.0) -> float: + """Get noise value at position.""" + ... + +class BSP: + """Binary space partitioning for dungeon generation. + + Recursively subdivides a rectangle into rooms. + """ + + x: int + y: int + width: int + height: int + level: int + horizontal: bool + position: int + + def __init__(self, x: int, y: int, width: int, height: int) -> None: ... + + def split_recursive(self, randomizer: Optional[Any] = None, nb: int = 8, + minHSize: int = 4, minVSize: int = 4, + maxHRatio: float = 1.5, maxVRatio: float = 1.5) -> None: + """Recursively split the BSP tree.""" + ... + + def traverse(self, callback: Callable[['BSP'], bool], + order: str = 'PRE_ORDER') -> None: + """Traverse BSP tree calling callback for each node.""" + ... + + def is_leaf(self) -> bool: + """Check if this is a leaf node (no children).""" + ... + + def contains(self, x: int, y: int) -> bool: + """Check if point is within this node's bounds.""" + ... + + def get_left(self) -> Optional['BSP']: + """Get left child node.""" + ... + + def get_right(self) -> Optional['BSP']: + """Get right child node.""" + ... + +class Entity(Drawable): + """Entity(grid_x=0, grid_y=0, texture=None, sprite_index=0, name='') + + Game entity that lives within a Grid. + """ + + @overload + def __init__(self) -> None: ... + @overload + def __init__(self, grid_x: float = 0, grid_y: float = 0, texture: Optional[Texture] = None, + sprite_index: int = 0, name: str = '') -> None: ... + + grid_x: float + grid_y: float + texture: Texture + sprite_index: int + sprite_number: int # Deprecated alias for sprite_index + grid: Optional[Grid] + + def at(self, grid_x: float, grid_y: float) -> None: + """Move entity to grid position.""" + ... + + def die(self) -> None: + """Remove entity from its grid.""" + ... + + def index(self) -> int: + """Get index in parent grid's entity collection.""" + ... + +class UICollection: + """Collection of UI drawable elements (Frame, Caption, Sprite, Grid, Line, Circle, Arc).""" + + def __len__(self) -> int: ... + def __getitem__(self, index: int) -> UIElement: ... + def __setitem__(self, index: int, value: UIElement) -> None: ... + def __delitem__(self, index: int) -> None: ... + def __contains__(self, item: UIElement) -> bool: ... + def __iter__(self) -> Any: ... + def __add__(self, other: 'UICollection') -> 'UICollection': ... + def __iadd__(self, other: 'UICollection') -> 'UICollection': ... + + def append(self, item: UIElement) -> None: ... + def extend(self, items: List[UIElement]) -> None: ... + def pop(self, index: int = -1) -> UIElement: ... + def remove(self, item: UIElement) -> None: ... + def index(self, item: UIElement) -> int: ... + def count(self, item: UIElement) -> int: ... + +class EntityCollection: + """Collection of Entity objects.""" + + def __len__(self) -> int: ... + def __getitem__(self, index: int) -> Entity: ... + def __setitem__(self, index: int, value: Entity) -> None: ... + def __delitem__(self, index: int) -> None: ... + def __contains__(self, item: Entity) -> bool: ... + def __iter__(self) -> Any: ... + def __add__(self, other: 'EntityCollection') -> 'EntityCollection': ... + def __iadd__(self, other: 'EntityCollection') -> 'EntityCollection': ... + + def append(self, item: Entity) -> None: ... + def extend(self, items: List[Entity]) -> None: ... + def pop(self, index: int = -1) -> Entity: ... + def remove(self, item: Entity) -> None: ... + def index(self, item: Entity) -> int: ... + def count(self, item: Entity) -> int: ... + +class Scene: + """Base class for object-oriented scenes.""" + + name: str + children: UICollection # UI elements collection (read-only alias for get_ui()) + # Keyboard handler receives (key: Key, action: InputState) per #184 + on_key: Optional[Callable[['Key', 'InputState'], None]] + + def __init__(self, name: str) -> None: ... + + def activate(self) -> None: + """Called when scene becomes active.""" + ... + + def deactivate(self) -> None: + """Called when scene becomes inactive.""" + ... + + def get_ui(self) -> UICollection: + """Get UI elements collection.""" + ... + + def on_keypress(self, key: str, pressed: bool) -> None: + """Handle keyboard events (override in subclass).""" + ... + + def on_click(self, x: float, y: float, button: int) -> None: + """Handle mouse clicks (override in subclass).""" + ... + + def on_enter(self) -> None: + """Called when entering the scene (override in subclass).""" + ... + + def on_exit(self) -> None: + """Called when leaving the scene (override in subclass).""" + ... + + def on_resize(self, width: int, height: int) -> None: + """Handle window resize events (override in subclass).""" + ... + + def update(self, dt: float) -> None: + """Update scene logic (override in subclass).""" + ... + +class Timer: + """Timer object for scheduled callbacks. + + Callback receives (timer_object, runtime_ms). + """ + + name: str + interval: int + callback: Callable[['Timer', float], None] + active: bool + paused: bool + stopped: bool + remaining: int + once: bool + + def __init__(self, name: str, callback: Callable[['Timer', float], None], + interval: int, once: bool = False, start: bool = True) -> None: ... + + def stop(self) -> None: + """Stop the timer.""" + ... + + def pause(self) -> None: + """Pause the timer.""" + ... + + def resume(self) -> None: + """Resume the timer.""" + ... + + def restart(self) -> None: + """Restart the timer.""" + ... + + def cancel(self) -> None: + """Cancel and remove the timer.""" + ... class Window: - """Window singleton for accessing and modifying the game window properties""" - def __init__(selftype(self)) -> None: ... + """Window singleton for managing the game window.""" - def center(self) -> None: ... - def get(self) -> Window: ... - def screenshot(selffilename: str = None) -> bytes | None: ... + resolution: Tuple[int, int] + fullscreen: bool + vsync: bool + title: str + fps_limit: int + game_resolution: Tuple[int, int] + scaling_mode: str -# Functions + @staticmethod + def get() -> 'Window': + """Get the window singleton instance.""" + ... -def createScene(name: str) -> None: ... -def createSoundBuffer(filename: str) -> int: ... -def currentScene() -> str: ... -def delTimer(name: str) -> None: ... -def exit() -> None: ... -def find(name: str, scene: str = None) -> UIDrawable | None: ... -def findAll(pattern: str, scene: str = None) -> list: ... -def getMetrics() -> dict: ... -def getMusicVolume() -> int: ... -def getSoundVolume() -> int: ... -def keypressScene(handler: callable) -> None: ... -def loadMusic(filename: str) -> None: ... -def playSound(buffer_id: int) -> None: ... -def sceneUI(scene: str = None) -> list: ... -def setMusicVolume(volume: int) -> None: ... -def setScale(multiplier: float) -> None: ... -def setScene(scene: str, transition: str = None, duration: float = 0.0) -> None: ... -def setSoundVolume(volume: int) -> None: ... -def setTimer(name: str, handler: callable, interval: int) -> None: ... +class Animation: + """Animation object for animating UI properties. -# Constants + Note: The preferred way to create animations is via the .animate() method + on drawable objects: + frame.animate("x", 500.0, 2.0, mcrfpy.Easing.EASE_IN_OUT) -FOV_BASIC: int -FOV_DIAMOND: int -FOV_PERMISSIVE_0: int -FOV_PERMISSIVE_1: int -FOV_PERMISSIVE_2: int -FOV_PERMISSIVE_3: int -FOV_PERMISSIVE_4: int -FOV_PERMISSIVE_5: int -FOV_PERMISSIVE_6: int -FOV_PERMISSIVE_7: int -FOV_PERMISSIVE_8: int -FOV_RESTRICTIVE: int -FOV_SHADOW: int -default_font: Any -default_texture: Any \ No newline at end of file + Animation callbacks (#229) receive (target, property, final_value): + def on_complete(target, prop, value): + print(f"{type(target).__name__}.{prop} = {value}") + """ + + property: str + duration: float + easing: 'Easing' + callback: Optional[Callable[[Any, str, Any], None]] + + def __init__(self, property: str, end_value: Any, duration: float, + easing: Union[str, 'Easing'] = 'linear', + callback: Optional[Callable[[Any, str, Any], None]] = None) -> None: ... + + def start(self, target: Any) -> None: + """Start the animation on a target object.""" + ... + + def get_current_value(self) -> Any: + """Get the current interpolated value.""" + ... + +# Module-level properties + +current_scene: Scene # Get or set the active scene + +# Module functions + +def createSoundBuffer(filename: str) -> int: + """Load a sound effect from a file and return its buffer ID.""" + ... + +def loadMusic(filename: str) -> None: + """Load and immediately play background music from a file.""" + ... + +def setMusicVolume(volume: int) -> None: + """Set the global music volume (0-100).""" + ... + +def setSoundVolume(volume: int) -> None: + """Set the global sound effects volume (0-100).""" + ... + +def playSound(buffer_id: int) -> None: + """Play a sound effect using a previously loaded buffer.""" + ... + +def getMusicVolume() -> int: + """Get the current music volume level (0-100).""" + ... + +def getSoundVolume() -> int: + """Get the current sound effects volume level (0-100).""" + ... + +def sceneUI(scene: Optional[str] = None) -> UICollection: + """Get all UI elements for a scene.""" + ... + +def currentScene() -> str: + """Get the name of the currently active scene.""" + ... + +def setScene(scene: str, transition: Optional[str] = None, duration: float = 0.0) -> None: + """Switch to a different scene with optional transition effect.""" + ... + +def createScene(name: str) -> None: + """Create a new empty scene.""" + ... + +def keypressScene(handler: Callable[[str, bool], None]) -> None: + """Set the keyboard event handler for the current scene. + + DEPRECATED: Use scene.on_key = handler instead. + The new handler receives (key: Key, action: InputState) enums. + """ + ... + +def setTimer(name: str, handler: Callable[[float], None], interval: int) -> None: + """Create or update a recurring timer. + + DEPRECATED: Use Timer(name, callback, interval) object instead. + """ + ... + +def delTimer(name: str) -> None: + """Stop and remove a timer. + + DEPRECATED: Use timer.cancel() method instead. + """ + ... + +def exit() -> None: + """Cleanly shut down the game engine and exit the application.""" + ... + +def setScale(multiplier: float) -> None: + """Scale the game window size (deprecated - use Window.resolution).""" + ... + +def find(name: str, scene: Optional[str] = None) -> Optional[UIElement]: + """Find the first UI element with the specified name.""" + ... + +def findAll(pattern: str, scene: Optional[str] = None) -> List[UIElement]: + """Find all UI elements matching a name pattern (supports * wildcards).""" + ... + +def getMetrics() -> Dict[str, Union[int, float]]: + """Get current performance metrics.""" + ... + +def step(dt: float) -> None: + """Advance the game loop by dt seconds (headless mode only).""" + ... + +def start_benchmark() -> None: + """Start performance benchmarking.""" + ... + +def end_benchmark() -> None: + """End performance benchmarking.""" + ... + +def log_benchmark(message: str) -> None: + """Log a benchmark measurement.""" + ... + +# Submodule +class automation: + """Automation API for testing and scripting.""" + + @staticmethod + def screenshot(filename: str) -> bool: + """Save a screenshot to the specified file.""" + ... + + @staticmethod + def position() -> Tuple[int, int]: + """Get current mouse position as (x, y) tuple.""" + ... + + @staticmethod + def size() -> Tuple[int, int]: + """Get screen size as (width, height) tuple.""" + ... + + @staticmethod + def onScreen(x: int, y: int) -> bool: + """Check if coordinates are within screen bounds.""" + ... + + @staticmethod + def moveTo(x: int, y: int, duration: float = 0.0) -> None: + """Move mouse to absolute position.""" + ... + + @staticmethod + def moveRel(xOffset: int, yOffset: int, duration: float = 0.0) -> None: + """Move mouse relative to current position.""" + ... + + @staticmethod + def dragTo(x: int, y: int, duration: float = 0.0, button: str = 'left') -> None: + """Drag mouse to position.""" + ... + + @staticmethod + def dragRel(xOffset: int, yOffset: int, duration: float = 0.0, button: str = 'left') -> None: + """Drag mouse relative to current position.""" + ... + + @staticmethod + def click(x: Optional[int] = None, y: Optional[int] = None, clicks: int = 1, + interval: float = 0.0, button: str = 'left') -> None: + """Click mouse at position.""" + ... + + @staticmethod + def mouseDown(x: Optional[int] = None, y: Optional[int] = None, button: str = 'left') -> None: + """Press mouse button down.""" + ... + + @staticmethod + def mouseUp(x: Optional[int] = None, y: Optional[int] = None, button: str = 'left') -> None: + """Release mouse button.""" + ... + + @staticmethod + def keyDown(key: str) -> None: + """Press key down.""" + ... + + @staticmethod + def keyUp(key: str) -> None: + """Release key.""" + ... + + @staticmethod + def press(key: str) -> None: + """Press and release a key.""" + ... + + @staticmethod + def typewrite(text: str, interval: float = 0.0) -> None: + """Type text with optional interval between characters.""" + ... diff --git a/tests/demo/audio_synth_demo.py b/tests/demo/audio_synth_demo.py new file mode 100644 index 0000000..15428ab --- /dev/null +++ b/tests/demo/audio_synth_demo.py @@ -0,0 +1,858 @@ +"""McRogueFace Audio Synth Demo - SFXR Clone + Animalese Speech + +Two-scene interactive demo showcasing the SoundBuffer procedural audio system: +- Scene 1 (SFXR): Full sfxr parameter editor with presets, waveform selection, + 24 synthesis parameters, DSP effect chain, and real-time playback +- Scene 2 (Animalese): Animal Crossing-style speech synthesis with formant + generation, character personality presets, and text-to-speech playback + +Controls: + SFXR Scene: SPACE=play, R=randomize, M=mutate, 1-4=waveform, TAB=switch + Animalese Scene: Type text, ENTER=speak, 1-5=personality, TAB=switch + Both: ESC=quit +""" +import mcrfpy +import sys +import random + +# ============================================================ +# Constants +# ============================================================ +W, H = 1024, 768 + +# Retro sfxr color palette +C_BG = mcrfpy.Color(198, 186, 168) +C_PANEL = mcrfpy.Color(178, 166, 148) +C_BTN = mcrfpy.Color(158, 148, 135) +C_BTN_ON = mcrfpy.Color(115, 168, 115) +C_BTN_ACC = mcrfpy.Color(168, 115, 115) +C_TEXT = mcrfpy.Color(35, 30, 25) +C_LABEL = mcrfpy.Color(55, 48, 40) +C_HEADER = mcrfpy.Color(25, 20, 15) +C_SL_BG = mcrfpy.Color(80, 72, 62) +C_SL_FILL = mcrfpy.Color(192, 152, 58) +C_VALUE = mcrfpy.Color(68, 60, 50) +C_OUTLINE = mcrfpy.Color(95, 85, 72) +C_ACCENT = mcrfpy.Color(200, 75, 55) +C_BG2 = mcrfpy.Color(45, 50, 65) +C_BG2_PNL = mcrfpy.Color(55, 62, 78) + +# ============================================================ +# Shared State +# ============================================================ +class S: + """Global mutable state.""" + wave_type = 0 + params = { + 'env_attack': 0.0, 'env_sustain': 0.3, 'env_punch': 0.0, + 'env_decay': 0.4, + 'base_freq': 0.3, 'freq_limit': 0.0, 'freq_ramp': 0.0, + 'freq_dramp': 0.0, + 'vib_strength': 0.0, 'vib_speed': 0.0, + 'arp_mod': 0.0, 'arp_speed': 0.0, + 'duty': 0.0, 'duty_ramp': 0.0, + 'repeat_speed': 0.0, + 'pha_offset': 0.0, 'pha_ramp': 0.0, + 'lpf_freq': 1.0, 'lpf_ramp': 0.0, 'lpf_resonance': 0.0, + 'hpf_freq': 0.0, 'hpf_ramp': 0.0, + } + volume = 80.0 + auto_play = True + + # Post-processing DSP + fx_on = { + 'low_pass': False, 'high_pass': False, 'echo': False, + 'reverb': False, 'distortion': False, 'bit_crush': False, + } + + # Animalese + text = "HELLO WORLD" + base_pitch = 180.0 + speech_rate = 12.0 + pitch_jitter = 2.0 + breathiness = 0.2 + + # UI refs (populated during setup) + sliders = {} + wave_btns = [] + fx_btns = {} + text_cap = None + letter_cap = None + speak_idx = 0 + speaking = False + + # Prevent GC of sound/timer objects + sound = None + anim_sound = None + speak_timer = None + + # Scene refs + sfxr_scene = None + anim_scene = None + + # Animalese sliders + anim_sliders = {} + + +# ============================================================ +# UI Helpers +# ============================================================ +# Keep all widget objects alive +_widgets = [] + +def _cap(parent, x, y, text, size=11, color=None): + """Add a Caption to parent.children.""" + c = mcrfpy.Caption(text=text, pos=(x, y), + fill_color=color or C_LABEL) + c.font_size = size + parent.children.append(c) + return c + +def _btn(parent, x, y, w, h, label, cb, color=None, fsize=11): + """Clickable button frame with centered text.""" + f = mcrfpy.Frame(pos=(x, y), size=(w, h), + fill_color=color or C_BTN, + outline_color=C_OUTLINE, outline=1.0) + parent.children.append(f) + tx = max(2, (w - len(label) * fsize * 0.58) / 2) + ty = max(1, (h - fsize) / 2) + c = mcrfpy.Caption(text=label, pos=(int(tx), int(ty)), + fill_color=C_TEXT) + c.font_size = fsize + f.children.append(c) + def click(pos, button, action): + if button == mcrfpy.MouseButton.LEFT and action == mcrfpy.InputState.PRESSED: + cb() + f.on_click = click + c.on_click = click + return f, c + + +class Slider: + """Horizontal slider widget with label and value display.""" + def __init__(self, parent, x, y, label, lo, hi, val, cb, + sw=140, sh=10, lw=108): + _widgets.append(self) + self.lo, self.hi, self.val, self.cb = lo, hi, val, cb + self.sw = sw + self.tx = x + lw # track absolute x + + # label + _cap(parent, x, y, label) + + # track + self.track = mcrfpy.Frame( + pos=(self.tx, y), size=(sw, sh), + fill_color=C_SL_BG, outline_color=C_OUTLINE, outline=1.0) + parent.children.append(self.track) + + # fill + pct = self._pct(val) + self.fill = mcrfpy.Frame( + pos=(0, 0), size=(max(1, int(sw * pct)), sh), + fill_color=C_SL_FILL) + self.track.children.append(self.fill) + + # value text + self.vcap = mcrfpy.Caption( + text=self._fmt(val), + pos=(self.tx + sw + 4, y), fill_color=C_VALUE) + self.vcap.font_size = 10 + parent.children.append(self.vcap) + + self.track.on_click = self._click + self.fill.on_click = self._click + + def _pct(self, v): + r = self.hi - self.lo + return (v - self.lo) / r if r else 0.0 + + def _fmt(self, v): + if abs(v) < 0.001 and v != 0: + return f"{v:.4f}" + return f"{v:.3f}" + + def _click(self, pos, button, action): + if button != mcrfpy.MouseButton.LEFT: + return + if action != mcrfpy.InputState.PRESSED: + return + p = max(0.0, min(1.0, (pos.x - self.tx) / self.sw)) + self.val = self.lo + p * (self.hi - self.lo) + self.fill.w = max(1, int(self.sw * p)) + self.vcap.text = self._fmt(self.val) + self.cb(self.val) + + def set(self, v): + self.val = max(self.lo, min(self.hi, v)) + p = self._pct(self.val) + self.fill.w = max(1, int(self.sw * p)) + self.vcap.text = self._fmt(self.val) + + +# ============================================================ +# SFXR Audio Logic +# ============================================================ +def play_sfxr(): + """Generate sfxr buffer from current params and play it.""" + p = dict(S.params) + p['wave_type'] = S.wave_type + try: + buf = mcrfpy.SoundBuffer.sfxr(**p) + except Exception as e: + print(f"sfxr generation error: {e}") + return + + # Post-processing DSP chain + if S.fx_on['low_pass']: + buf = buf.low_pass(2000.0) + if S.fx_on['high_pass']: + buf = buf.high_pass(500.0) + if S.fx_on['echo']: + buf = buf.echo(200.0, 0.4, 0.5) + if S.fx_on['reverb']: + buf = buf.reverb(0.8, 0.5, 0.3) + if S.fx_on['distortion']: + buf = buf.distortion(2.0) + if S.fx_on['bit_crush']: + buf = buf.bit_crush(8, 4) + + buf = buf.normalize() + if buf.sample_count == 0: + # Some param combos produce silence (e.g. freq_limit > base_freq) + return + S.sound = mcrfpy.Sound(buf) + S.sound.volume = S.volume + S.sound.play() + + +def load_preset(name): + """Load sfxr preset, sync UI, optionally auto-play.""" + try: + buf = mcrfpy.SoundBuffer.sfxr(name) + except Exception as e: + print(f"Preset error: {e}") + return + mp = buf.sfxr_params + if not mp: + return + S.wave_type = int(mp.get('wave_type', 0)) + for k in S.params: + if k in mp: + S.params[k] = mp[k] + _sync_sfxr_ui() + if S.auto_play: + play_sfxr() + + +def mutate_sfxr(): + """Mutate current params slightly.""" + p = dict(S.params) + p['wave_type'] = S.wave_type + try: + buf = mcrfpy.SoundBuffer.sfxr(**p) + m = buf.sfxr_mutate(0.05) + except Exception as e: + print(f"Mutate error: {e}") + return + mp = m.sfxr_params + if mp: + S.wave_type = int(mp.get('wave_type', S.wave_type)) + for k in S.params: + if k in mp: + S.params[k] = mp[k] + _sync_sfxr_ui() + if S.auto_play: + play_sfxr() + + +def randomize_sfxr(): + """Load a random preset with random seed.""" + presets = ["coin", "laser", "explosion", "powerup", "hurt", "jump", "blip"] + buf = mcrfpy.SoundBuffer.sfxr(random.choice(presets), + seed=random.randint(0, 999999)) + mp = buf.sfxr_params + if mp: + S.wave_type = int(mp.get('wave_type', 0)) + for k in S.params: + if k in mp: + S.params[k] = mp[k] + _sync_sfxr_ui() + if S.auto_play: + play_sfxr() + + +def _sync_sfxr_ui(): + """Push state to all sfxr UI widgets.""" + for k, sl in S.sliders.items(): + if k in S.params: + sl.set(S.params[k]) + _update_wave_btns() + + +def _update_wave_btns(): + for i, (btn, _cap) in enumerate(S.wave_btns): + btn.fill_color = C_BTN_ON if i == S.wave_type else C_BTN + + +def set_wave(i): + S.wave_type = i + _update_wave_btns() + if S.auto_play: + play_sfxr() + + +def toggle_fx(key): + S.fx_on[key] = not S.fx_on[key] + if key in S.fx_btns: + S.fx_btns[key].fill_color = C_BTN_ON if S.fx_on[key] else C_BTN + + +# ============================================================ +# Animalese Audio Logic +# ============================================================ +# Vowel formant frequencies (F1, F2) +FORMANTS = { + 'ah': (660, 1700), + 'eh': (530, 1850), + 'ee': (270, 2300), + 'oh': (570, 870), + 'oo': (300, 870), +} + +LETTER_VOWEL = {} +for _c in 'AHLR': + LETTER_VOWEL[_c] = 'ah' +for _c in 'EDTSNZ': + LETTER_VOWEL[_c] = 'eh' +for _c in 'ICJY': + LETTER_VOWEL[_c] = 'ee' +for _c in 'OGKQX': + LETTER_VOWEL[_c] = 'oh' +for _c in 'UBFMPVW': + LETTER_VOWEL[_c] = 'oo' + +CONSONANTS = set('BCDFGJKPQSTVXZ') + +# Cache generated vowel base sounds per pitch +_vowel_cache = {} + +def _make_vowel(vowel_key, pitch, breathiness): + """Generate a single vowel sound (~120ms) at given pitch.""" + f1, f2 = FORMANTS[vowel_key] + dur = 0.12 + + # Glottal source: sawtooth at fundamental + source = mcrfpy.SoundBuffer.tone(pitch, dur, "saw", + attack=0.005, decay=0.015, sustain=0.7, release=0.015) + + # Formant approximation: low-pass at F1 frequency + # (single-pole filter, so we use a higher cutoff for approximation) + filtered = source.low_pass(float(f1) * 1.5) + + # Add breathiness as noise + if breathiness > 0.05: + noise = mcrfpy.SoundBuffer.tone(1000, dur, "noise", + attack=0.003, decay=0.01, sustain=breathiness * 0.25, + release=0.01) + filtered = mcrfpy.SoundBuffer.mix([filtered, noise]) + + return filtered.normalize() + + +def _make_letter_sound(char, pitch, breathiness): + """Generate audio for a single letter.""" + ch = char.upper() + if ch not in LETTER_VOWEL: + return None + + vowel = _make_vowel(LETTER_VOWEL[ch], pitch, breathiness) + + # Add consonant noise burst + if ch in CONSONANTS: + burst = mcrfpy.SoundBuffer.tone(2500, 0.012, "noise", + attack=0.001, decay=0.003, sustain=0.6, release=0.003) + vowel = mcrfpy.SoundBuffer.concat([burst, vowel], overlap=0.004) + + return vowel + + +def speak_text(): + """Generate and play animalese speech from current text.""" + text = S.text.upper() + if not text.strip(): + return + + rate = S.speech_rate + letter_dur = 1.0 / rate + overlap = letter_dur * 0.25 + + bufs = [] + for ch in text: + if ch == ' ': + # Short silence for spaces + sil = mcrfpy.SoundBuffer.from_samples( + b'\x00\x00' * int(44100 * 0.04), 1, 44100) + bufs.append(sil) + elif ch in '.!?': + # Longer pause for punctuation + sil = mcrfpy.SoundBuffer.from_samples( + b'\x00\x00' * int(44100 * 0.12), 1, 44100) + bufs.append(sil) + elif ch.isalpha(): + # Pitch jitter in semitones + jitter = random.uniform(-S.pitch_jitter, S.pitch_jitter) + pitch = S.base_pitch * (2.0 ** (jitter / 12.0)) + lsnd = _make_letter_sound(ch, pitch, S.breathiness) + if lsnd: + # Trim to letter duration + if lsnd.duration > letter_dur: + lsnd = lsnd.slice(0, letter_dur) + bufs.append(lsnd) + + if not bufs: + return + + result = mcrfpy.SoundBuffer.concat(bufs, overlap=overlap) + result = result.normalize() + + # Optional: add room reverb for warmth + result = result.reverb(0.3, 0.5, 0.15) + + S.anim_sound = mcrfpy.Sound(result) + S.anim_sound.volume = S.volume + S.anim_sound.play() + + # Start letter animation + S.speak_idx = 0 + S.speaking = True + if S.letter_cap: + S.letter_cap.text = "" + interval = int(1000.0 / S.speech_rate) + S.speak_timer = mcrfpy.Timer("speak_tick", _tick_letter, interval) + + +def _tick_letter(timer, runtime): + """Advance the speaking letter display.""" + text = S.text.upper() + if S.speak_idx < len(text): + ch = text[S.speak_idx] + if S.letter_cap: + S.letter_cap.text = ch if ch.strip() else "_" + S.speak_idx += 1 + else: + if S.letter_cap: + S.letter_cap.text = "" + S.speaking = False + timer.stop() + + +# Personality presets +PERSONALITIES = { + 'CRANKY': {'pitch': 90, 'rate': 10, 'jitter': 1.5, 'breath': 0.4}, + 'NORMAL': {'pitch': 180, 'rate': 12, 'jitter': 2.0, 'breath': 0.2}, + 'PEPPY': {'pitch': 280, 'rate': 18, 'jitter': 3.5, 'breath': 0.1}, + 'LAZY': {'pitch': 120, 'rate': 8, 'jitter': 1.0, 'breath': 0.5}, + 'JOCK': {'pitch': 100, 'rate': 15, 'jitter': 2.5, 'breath': 0.3}, +} + +def load_personality(name): + p = PERSONALITIES[name] + S.base_pitch = p['pitch'] + S.speech_rate = p['rate'] + S.pitch_jitter = p['jitter'] + S.breathiness = p['breath'] + _sync_anim_ui() + + +def _sync_anim_ui(): + for k, sl in S.anim_sliders.items(): + if k == 'pitch': + sl.set(S.base_pitch) + elif k == 'rate': + sl.set(S.speech_rate) + elif k == 'jitter': + sl.set(S.pitch_jitter) + elif k == 'breath': + sl.set(S.breathiness) + + +# ============================================================ +# Build SFXR Scene +# ============================================================ +def build_sfxr(): + scene = mcrfpy.Scene("sfxr") + bg = mcrfpy.Frame(pos=(0, 0), size=(W, H), fill_color=C_BG) + scene.children.append(bg) + + # --- Left Panel: Presets --- + _cap(bg, 12, 8, "GENERATOR", size=13, color=C_HEADER) + + presets = [ + ("PICKUP/COIN", "coin"), + ("LASER/SHOOT", "laser"), + ("EXPLOSION", "explosion"), + ("POWERUP", "powerup"), + ("HIT/HURT", "hurt"), + ("JUMP", "jump"), + ("BLIP/SELECT", "blip"), + ] + py = 30 + for label, preset in presets: + _btn(bg, 10, py, 118, 22, label, + lambda p=preset: load_preset(p)) + py += 26 + + py += 10 + _btn(bg, 10, py, 118, 22, "MUTATE", mutate_sfxr, color=C_BTN_ACC) + py += 26 + _btn(bg, 10, py, 118, 22, "RANDOMIZE", randomize_sfxr, color=C_BTN_ACC) + + # --- Top: Waveform Selection --- + _cap(bg, 145, 8, "MANUAL SETTINGS", size=13, color=C_HEADER) + + wave_names = ["SQUARE", "SAWTOOTH", "SINEWAVE", "NOISE"] + S.wave_btns = [] + for i, wn in enumerate(wave_names): + bx = 145 + i * 105 + b, c = _btn(bg, bx, 28, 100, 22, wn, + lambda idx=i: set_wave(idx)) + S.wave_btns.append((b, c)) + _update_wave_btns() + + # --- Center: SFXR Parameter Sliders --- + # Column 1 + col1_x = 140 + col1_params = [ + ("ATTACK TIME", 'env_attack', 0.0, 1.0), + ("SUSTAIN TIME", 'env_sustain', 0.0, 1.0), + ("SUSTAIN PUNCH", 'env_punch', 0.0, 1.0), + ("DECAY TIME", 'env_decay', 0.0, 1.0), + ("", None, 0, 0), # spacer + ("START FREQ", 'base_freq', 0.0, 1.0), + ("MIN FREQ", 'freq_limit', 0.0, 1.0), + ("SLIDE", 'freq_ramp', -1.0, 1.0), + ("DELTA SLIDE", 'freq_dramp', -1.0, 1.0), + ("", None, 0, 0), + ("VIB DEPTH", 'vib_strength', 0.0, 1.0), + ("VIB SPEED", 'vib_speed', 0.0, 1.0), + ] + + cy = 58 + ROW = 22 + for label, key, lo, hi in col1_params: + if key is None: + cy += 8 + continue + val = S.params[key] + sl = Slider(bg, col1_x, cy, label, lo, hi, val, + lambda v, k=key: _sfxr_param_changed(k, v), + sw=140, lw=108) + S.sliders[key] = sl + cy += ROW + + # Column 2 + col2_x = 530 + col2_params = [ + ("SQUARE DUTY", 'duty', 0.0, 1.0), + ("DUTY SWEEP", 'duty_ramp', -1.0, 1.0), + ("", None, 0, 0), + ("REPEAT SPEED", 'repeat_speed', 0.0, 1.0), + ("", None, 0, 0), + ("PHA OFFSET", 'pha_offset', -1.0, 1.0), + ("PHA SWEEP", 'pha_ramp', -1.0, 1.0), + ("", None, 0, 0), + ("LP CUTOFF", 'lpf_freq', 0.0, 1.0), + ("LP SWEEP", 'lpf_ramp', -1.0, 1.0), + ("LP RESONANCE", 'lpf_resonance', 0.0, 1.0), + ("HP CUTOFF", 'hpf_freq', 0.0, 1.0), + ("HP SWEEP", 'hpf_ramp', -1.0, 1.0), + ] + + cy = 58 + for label, key, lo, hi in col2_params: + if key is None: + cy += 8 + continue + val = S.params[key] + sl = Slider(bg, col2_x, cy, label, lo, hi, val, + lambda v, k=key: _sfxr_param_changed(k, v), + sw=140, lw=108) + S.sliders[key] = sl + cy += ROW + + # Column 2 extras: arpeggiation + col2_params2 = [ + ("ARP MOD", 'arp_mod', -1.0, 1.0), + ("ARP SPEED", 'arp_speed', 0.0, 1.0), + ] + cy += 8 + for label, key, lo, hi in col2_params2: + val = S.params[key] + sl = Slider(bg, col2_x, cy, label, lo, hi, val, + lambda v, k=key: _sfxr_param_changed(k, v), + sw=140, lw=108) + S.sliders[key] = sl + cy += ROW + + # --- Right Panel --- + rx = 790 + + # Volume + _cap(bg, rx, 8, "VOLUME", size=12, color=C_HEADER) + Slider(bg, rx, 26, "", 0, 100, S.volume, + lambda v: setattr(S, 'volume', v), + sw=180, lw=0) + + # Play button + _btn(bg, rx, 50, 180, 28, "PLAY SOUND", play_sfxr, + color=mcrfpy.Color(180, 100, 80), fsize=13) + + # Auto-play toggle + auto_btn, auto_cap = _btn(bg, rx, 86, 180, 22, "AUTO-PLAY: ON", + lambda: None, color=C_BTN_ON) + def toggle_auto(): + S.auto_play = not S.auto_play + auto_btn.fill_color = C_BTN_ON if S.auto_play else C_BTN + auto_cap.text = "AUTO-PLAY: ON" if S.auto_play else "AUTO-PLAY: OFF" + auto_btn.on_click = lambda p, b, a: ( + toggle_auto() if b == mcrfpy.MouseButton.LEFT + and a == mcrfpy.InputState.PRESSED else None) + auto_cap.on_click = auto_btn.on_click + + # DSP Effects + _cap(bg, rx, 120, "DSP EFFECTS", size=12, color=C_HEADER) + + fx_list = [ + ("LOW PASS", 'low_pass'), + ("HIGH PASS", 'high_pass'), + ("ECHO", 'echo'), + ("REVERB", 'reverb'), + ("DISTORTION", 'distortion'), + ("BIT CRUSH", 'bit_crush'), + ] + fy = 140 + for label, key in fx_list: + fb, fc = _btn(bg, rx, fy, 180, 20, label, + lambda k=key: toggle_fx(k)) + S.fx_btns[key] = fb + fy += 24 + + # Navigation + _cap(bg, rx, fy + 16, "NAVIGATION", size=12, color=C_HEADER) + _btn(bg, rx, fy + 36, 180, 26, "ANIMALESE >>", + lambda: setattr(mcrfpy, 'current_scene', S.anim_scene)) + + # --- Keyboard hints --- + hints_y = H - 90 + _cap(bg, 10, hints_y, "Keyboard:", size=11, color=C_HEADER) + _cap(bg, 10, hints_y + 16, "SPACE = Play R = Randomize M = Mutate", + size=10, color=C_VALUE) + _cap(bg, 10, hints_y + 30, "1-4 = Waveform TAB = Animalese ESC = Quit", + size=10, color=C_VALUE) + + # --- Key handler --- + def on_key(key, action): + if action != mcrfpy.InputState.PRESSED: + return + if key == mcrfpy.Key.ESCAPE: + sys.exit(0) + elif key == mcrfpy.Key.TAB: + mcrfpy.current_scene = S.anim_scene + elif key == mcrfpy.Key.SPACE: + play_sfxr() + elif key == mcrfpy.Key.R: + randomize_sfxr() + elif key == mcrfpy.Key.M: + mutate_sfxr() + elif key == mcrfpy.Key.NUM_1: + set_wave(0) + elif key == mcrfpy.Key.NUM_2: + set_wave(1) + elif key == mcrfpy.Key.NUM_3: + set_wave(2) + elif key == mcrfpy.Key.NUM_4: + set_wave(3) + + scene.on_key = on_key + return scene + + +def _sfxr_param_changed(key, val): + """Called when a slider changes an sfxr param.""" + S.params[key] = val + if S.auto_play: + play_sfxr() + + +# ============================================================ +# Build Animalese Scene +# ============================================================ +def build_animalese(): + scene = mcrfpy.Scene("animalese") + bg = mcrfpy.Frame(pos=(0, 0), size=(W, H), fill_color=C_BG2) + scene.children.append(bg) + + # Title + _cap(bg, 20, 10, "ANIMALESE SPEECH SYNTH", size=16, color=mcrfpy.Color(220, 215, 200)) + + # --- Text Display --- + _cap(bg, 20, 50, "TEXT (type to edit, ENTER to speak):", size=11, + color=mcrfpy.Color(160, 155, 140)) + + # Text input display + text_frame = mcrfpy.Frame(pos=(20, 70), size=(700, 36), + fill_color=mcrfpy.Color(30, 35, 48), + outline_color=mcrfpy.Color(100, 110, 130), + outline=1.0) + bg.children.append(text_frame) + S.text_cap = mcrfpy.Caption(text=S.text + "_", pos=(6, 8), + fill_color=mcrfpy.Color(220, 220, 180)) + S.text_cap.font_size = 16 + text_frame.children.append(S.text_cap) + + # Current letter display (large) + _cap(bg, 740, 50, "NOW:", size=11, color=mcrfpy.Color(160, 155, 140)) + S.letter_cap = mcrfpy.Caption(text="", pos=(740, 68), + fill_color=C_ACCENT) + S.letter_cap.font_size = 42 + bg.children.append(S.letter_cap) + + # --- Personality Presets --- + _cap(bg, 20, 120, "CHARACTER PRESETS", size=13, + color=mcrfpy.Color(200, 195, 180)) + + px = 20 + for name in ['CRANKY', 'NORMAL', 'PEPPY', 'LAZY', 'JOCK']: + _btn(bg, px, 142, 95, 24, name, + lambda n=name: load_personality(n), + color=C_BG2_PNL) + px += 102 + + # --- Voice Parameters --- + _cap(bg, 20, 185, "VOICE PARAMETERS", size=13, + color=mcrfpy.Color(200, 195, 180)) + + sy = 208 + S.anim_sliders['pitch'] = Slider( + bg, 20, sy, "BASE PITCH", 60, 350, S.base_pitch, + lambda v: setattr(S, 'base_pitch', v), + sw=200, lw=110) + sy += 28 + S.anim_sliders['rate'] = Slider( + bg, 20, sy, "SPEECH RATE", 4, 24, S.speech_rate, + lambda v: setattr(S, 'speech_rate', v), + sw=200, lw=110) + sy += 28 + S.anim_sliders['jitter'] = Slider( + bg, 20, sy, "PITCH JITTER", 0, 6, S.pitch_jitter, + lambda v: setattr(S, 'pitch_jitter', v), + sw=200, lw=110) + sy += 28 + S.anim_sliders['breath'] = Slider( + bg, 20, sy, "BREATHINESS", 0, 1.0, S.breathiness, + lambda v: setattr(S, 'breathiness', v), + sw=200, lw=110) + + # --- Speak Button --- + _btn(bg, 20, sy + 38, 200, 32, "SPEAK", speak_text, + color=mcrfpy.Color(80, 140, 80), fsize=14) + + # --- Formant Reference --- + ry = 185 + _cap(bg, 550, ry, "LETTER -> VOWEL MAPPING", size=12, + color=mcrfpy.Color(180, 175, 160)) + ry += 22 + mappings = [ + ("A H L R", "-> 'ah' (F1=660, F2=1700)"), + ("E D T S N Z", "-> 'eh' (F1=530, F2=1850)"), + ("I C J Y", "-> 'ee' (F1=270, F2=2300)"), + ("O G K Q X", "-> 'oh' (F1=570, F2=870)"), + ("U B F M P V W", "-> 'oo' (F1=300, F2=870)"), + ] + for letters, desc in mappings: + _cap(bg, 555, ry, letters, size=11, + color=mcrfpy.Color(200, 180, 120)) + _cap(bg, 680, ry, desc, size=10, + color=mcrfpy.Color(140, 135, 125)) + ry += 18 + + _cap(bg, 555, ry + 8, "Consonants (B,C,D,...) add", size=10, + color=mcrfpy.Color(120, 115, 105)) + _cap(bg, 555, ry + 22, "a noise burst before the vowel", size=10, + color=mcrfpy.Color(120, 115, 105)) + + # --- How it works --- + hy = 420 + _cap(bg, 20, hy, "HOW IT WORKS", size=13, + color=mcrfpy.Color(200, 195, 180)) + steps = [ + "1. Each letter maps to a vowel class (ah/eh/ee/oh/oo)", + "2. Sawtooth tone at base_pitch filtered through low_pass (formant F1)", + "3. Noise mixed in for breathiness, burst prepended for consonants", + "4. Pitch jittered per-letter for natural variation", + "5. Letters concatenated with overlap for babble effect", + "6. Light reverb applied for warmth", + ] + for i, step in enumerate(steps): + _cap(bg, 25, hy + 22 + i * 17, step, size=10, + color=mcrfpy.Color(140, 138, 128)) + + # --- Navigation --- + _btn(bg, 20, H - 50, 200, 28, "<< SFXR SYNTH", + lambda: setattr(mcrfpy, 'current_scene', S.sfxr_scene), + color=C_BG2_PNL) + + # --- Keyboard hints --- + _cap(bg, 250, H - 46, "Type letters to edit text | ENTER = Speak | " + "1-5 = Presets | TAB = SFXR | ESC = Quit", + size=10, color=mcrfpy.Color(110, 108, 98)) + + # Build key-to-char map + key_chars = {} + for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': + k = getattr(mcrfpy.Key, c, None) + if k is not None: + key_chars[k] = c + + # --- Key handler --- + def on_key(key, action): + if action != mcrfpy.InputState.PRESSED: + return + if key == mcrfpy.Key.ESCAPE: + sys.exit(0) + elif key == mcrfpy.Key.TAB: + mcrfpy.current_scene = S.sfxr_scene + elif key == mcrfpy.Key.ENTER: + speak_text() + elif key == mcrfpy.Key.BACKSPACE: + if S.text: + S.text = S.text[:-1] + S.text_cap.text = S.text + "_" + elif key == mcrfpy.Key.SPACE: + S.text += ' ' + S.text_cap.text = S.text + "_" + elif key == mcrfpy.Key.NUM_1: + load_personality('CRANKY') + elif key == mcrfpy.Key.NUM_2: + load_personality('NORMAL') + elif key == mcrfpy.Key.NUM_3: + load_personality('PEPPY') + elif key == mcrfpy.Key.NUM_4: + load_personality('LAZY') + elif key == mcrfpy.Key.NUM_5: + load_personality('JOCK') + elif key in key_chars: + S.text += key_chars[key] + S.text_cap.text = S.text + "_" + + scene.on_key = on_key + return scene + + +# ============================================================ +# Main +# ============================================================ +S.sfxr_scene = build_sfxr() +S.anim_scene = build_animalese() +mcrfpy.current_scene = S.sfxr_scene diff --git a/tests/unit/soundbuffer_compose_test.py b/tests/unit/soundbuffer_compose_test.py new file mode 100644 index 0000000..2fa7bf0 --- /dev/null +++ b/tests/unit/soundbuffer_compose_test.py @@ -0,0 +1,80 @@ +"""Test SoundBuffer composition (concat, mix).""" +import mcrfpy +import sys + +# Create test buffers +a = mcrfpy.SoundBuffer.tone(440, 0.3, "sine") +b = mcrfpy.SoundBuffer.tone(880, 0.2, "sine") +c = mcrfpy.SoundBuffer.tone(660, 0.4, "square") + +# Test 1: concat two buffers +result = mcrfpy.SoundBuffer.concat([a, b]) +assert result is not None +expected = a.duration + b.duration +assert abs(result.duration - expected) < 0.02, f"Expected ~{expected:.3f}s, got {result.duration:.3f}s" +print(f"PASS: concat([0.3s, 0.2s]) -> {result.duration:.3f}s") + +# Test 2: concat three buffers +result3 = mcrfpy.SoundBuffer.concat([a, b, c]) +expected3 = a.duration + b.duration + c.duration +assert abs(result3.duration - expected3) < 0.03 +print(f"PASS: concat([0.3s, 0.2s, 0.4s]) -> {result3.duration:.3f}s") + +# Test 3: concat with crossfade overlap +overlapped = mcrfpy.SoundBuffer.concat([a, b], overlap=0.05) +# Duration should be about 0.05s shorter than without overlap +expected_overlap = a.duration + b.duration - 0.05 +assert abs(overlapped.duration - expected_overlap) < 0.03, \ + f"Expected ~{expected_overlap:.3f}s, got {overlapped.duration:.3f}s" +print(f"PASS: concat with overlap=0.05 -> {overlapped.duration:.3f}s") + +# Test 4: mix two buffers +mixed = mcrfpy.SoundBuffer.mix([a, b]) +assert mixed is not None +# mix pads to longest buffer +assert abs(mixed.duration - max(a.duration, b.duration)) < 0.02 +print(f"PASS: mix([0.3s, 0.2s]) -> {mixed.duration:.3f}s (padded to longest)") + +# Test 5: mix same duration buffers +d = mcrfpy.SoundBuffer.tone(440, 0.5, "sine") +e = mcrfpy.SoundBuffer.tone(660, 0.5, "sine") +mixed2 = mcrfpy.SoundBuffer.mix([d, e]) +assert abs(mixed2.duration - 0.5) < 0.02 +print(f"PASS: mix([0.5s, 0.5s]) -> {mixed2.duration:.3f}s") + +# Test 6: concat empty list raises ValueError +try: + mcrfpy.SoundBuffer.concat([]) + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: concat([]) raises ValueError") + +# Test 7: mix empty list raises ValueError +try: + mcrfpy.SoundBuffer.mix([]) + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: mix([]) raises ValueError") + +# Test 8: concat with non-SoundBuffer raises TypeError +try: + mcrfpy.SoundBuffer.concat([a, "not a buffer"]) + assert False, "Should have raised TypeError" +except TypeError: + pass +print("PASS: concat with invalid types raises TypeError") + +# Test 9: concat single buffer returns copy +single = mcrfpy.SoundBuffer.concat([a]) +assert abs(single.duration - a.duration) < 0.02 +print("PASS: concat single buffer works") + +# Test 10: mix single buffer returns copy +single_mix = mcrfpy.SoundBuffer.mix([a]) +assert abs(single_mix.duration - a.duration) < 0.02 +print("PASS: mix single buffer works") + +print("\nAll soundbuffer_compose tests passed!") +sys.exit(0) diff --git a/tests/unit/soundbuffer_core_test.py b/tests/unit/soundbuffer_core_test.py new file mode 100644 index 0000000..b06d872 --- /dev/null +++ b/tests/unit/soundbuffer_core_test.py @@ -0,0 +1,59 @@ +"""Test SoundBuffer core creation and properties.""" +import mcrfpy +import sys +import struct + +# Test 1: SoundBuffer type exists +assert hasattr(mcrfpy, 'SoundBuffer'), "mcrfpy.SoundBuffer not found" +print("PASS: SoundBuffer type exists") + +# Test 2: from_samples factory +# Create 1 second of silence (44100 mono samples of int16 zeros) +sample_rate = 44100 +channels = 1 +num_samples = sample_rate # 1 second +raw_data = b'\x00\x00' * num_samples # int16 zeros +buf = mcrfpy.SoundBuffer.from_samples(raw_data, channels, sample_rate) +assert buf is not None +print("PASS: from_samples creates SoundBuffer") + +# Test 3: Properties +assert abs(buf.duration - 1.0) < 0.01, f"Expected ~1.0s duration, got {buf.duration}" +assert buf.sample_count == num_samples, f"Expected {num_samples} samples, got {buf.sample_count}" +assert buf.sample_rate == sample_rate, f"Expected {sample_rate} rate, got {buf.sample_rate}" +assert buf.channels == channels, f"Expected {channels} channels, got {buf.channels}" +print("PASS: Properties correct (duration, sample_count, sample_rate, channels)") + +# Test 4: sfxr_params is None for non-sfxr buffer +assert buf.sfxr_params is None +print("PASS: sfxr_params is None for non-sfxr buffer") + +# Test 5: repr works +r = repr(buf) +assert "SoundBuffer" in r +assert "duration" in r +print(f"PASS: repr = {r}") + +# Test 6: from_samples with actual waveform data +# Generate a 440Hz sine wave, 0.5 seconds +import math +num_samples2 = int(sample_rate * 0.5) +samples = [] +for i in range(num_samples2): + t = i / sample_rate + val = int(32000 * math.sin(2 * math.pi * 440 * t)) + samples.append(val) +raw = struct.pack(f'<{num_samples2}h', *samples) +buf2 = mcrfpy.SoundBuffer.from_samples(raw, 1, 44100) +assert abs(buf2.duration - 0.5) < 0.01 +print("PASS: from_samples with sine wave data") + +# Test 7: stereo from_samples +stereo_samples = b'\x00\x00' * (44100 * 2) # 1 second stereo +buf3 = mcrfpy.SoundBuffer.from_samples(stereo_samples, 2, 44100) +assert buf3.channels == 2 +assert abs(buf3.duration - 1.0) < 0.01 +print("PASS: Stereo from_samples") + +print("\nAll soundbuffer_core tests passed!") +sys.exit(0) diff --git a/tests/unit/soundbuffer_effects_test.py b/tests/unit/soundbuffer_effects_test.py new file mode 100644 index 0000000..2a62dfc --- /dev/null +++ b/tests/unit/soundbuffer_effects_test.py @@ -0,0 +1,102 @@ +"""Test SoundBuffer DSP effects.""" +import mcrfpy +import sys + +# Create a test buffer: 0.5s 440Hz sine +src = mcrfpy.SoundBuffer.tone(440, 0.5, "sine") + +# Test 1: pitch_shift +higher = src.pitch_shift(2.0) +assert higher is not None +assert higher.sample_count > 0 +# Higher pitch = shorter duration +assert higher.duration < src.duration, f"pitch_shift(2.0) should be shorter: {higher.duration} vs {src.duration}" +print(f"PASS: pitch_shift(2.0) -> {higher.duration:.3f}s (was {src.duration:.3f}s)") + +lower = src.pitch_shift(0.5) +assert lower.duration > src.duration, f"pitch_shift(0.5) should be longer: {lower.duration} vs {src.duration}" +print(f"PASS: pitch_shift(0.5) -> {lower.duration:.3f}s") + +# Test 2: low_pass +lp = src.low_pass(500.0) +assert lp is not None +assert lp.sample_count == src.sample_count +assert lp.duration == src.duration +print("PASS: low_pass preserves sample count and duration") + +# Test 3: high_pass +hp = src.high_pass(500.0) +assert hp is not None +assert hp.sample_count == src.sample_count +print("PASS: high_pass preserves sample count") + +# Test 4: echo +echoed = src.echo(200.0, 0.4, 0.5) +assert echoed is not None +assert echoed.sample_count == src.sample_count # same length +print("PASS: echo works") + +# Test 5: reverb +reverbed = src.reverb(0.8, 0.5, 0.3) +assert reverbed is not None +assert reverbed.sample_count == src.sample_count +print("PASS: reverb works") + +# Test 6: distortion +dist = src.distortion(2.0) +assert dist is not None +assert dist.sample_count == src.sample_count +print("PASS: distortion works") + +# Test 7: bit_crush +crushed = src.bit_crush(8, 4) +assert crushed is not None +assert crushed.sample_count == src.sample_count +print("PASS: bit_crush works") + +# Test 8: normalize +normed = src.normalize() +assert normed is not None +assert normed.sample_count == src.sample_count +print("PASS: normalize works") + +# Test 9: reverse +rev = src.reverse() +assert rev is not None +assert rev.sample_count == src.sample_count +print("PASS: reverse preserves sample count") + +# Test 10: slice +sliced = src.slice(0.1, 0.3) +assert sliced is not None +expected_duration = 0.2 +assert abs(sliced.duration - expected_duration) < 0.02, f"Expected ~{expected_duration}s, got {sliced.duration}s" +print(f"PASS: slice(0.1, 0.3) -> {sliced.duration:.3f}s") + +# Test 11: slice out of bounds is safe +empty = src.slice(0.5, 0.5) # zero-length +assert empty.sample_count == 0 +print("PASS: slice with start==end returns empty") + +# Test 12: Chaining effects (effects return new buffers) +chained = src.low_pass(1000).distortion(1.5).normalize() +assert chained is not None +assert chained.sample_count > 0 +print("PASS: Chaining effects works") + +# Test 13: Effects don't modify original +orig_count = src.sample_count +src.pitch_shift(2.0) +assert src.sample_count == orig_count, "Original should not be modified" +print("PASS: Effects don't modify original buffer") + +# Test 14: pitch_shift with invalid factor raises ValueError +try: + src.pitch_shift(-1.0) + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: pitch_shift with negative factor raises ValueError") + +print("\nAll soundbuffer_effects tests passed!") +sys.exit(0) diff --git a/tests/unit/soundbuffer_sfxr_test.py b/tests/unit/soundbuffer_sfxr_test.py new file mode 100644 index 0000000..0a2f5fa --- /dev/null +++ b/tests/unit/soundbuffer_sfxr_test.py @@ -0,0 +1,94 @@ +"""Test SoundBuffer sfxr synthesis.""" +import mcrfpy +import sys + +# Test 1: All presets work +presets = ["coin", "laser", "explosion", "powerup", "hurt", "jump", "blip"] +for preset in presets: + buf = mcrfpy.SoundBuffer.sfxr(preset) + assert buf is not None, f"sfxr('{preset}') returned None" + assert buf.sample_count > 0, f"sfxr('{preset}') has 0 samples" + assert buf.duration > 0, f"sfxr('{preset}') has 0 duration" + assert buf.sfxr_params is not None, f"sfxr('{preset}') has no params" + print(f" PASS: sfxr('{preset}') -> {buf.duration:.3f}s, {buf.sample_count} samples") + +print("PASS: All sfxr presets generate audio") + +# Test 2: Deterministic with seed +buf1 = mcrfpy.SoundBuffer.sfxr("coin", seed=42) +buf2 = mcrfpy.SoundBuffer.sfxr("coin", seed=42) +assert buf1.sample_count == buf2.sample_count, "Same seed should produce same sample count" +assert buf1.duration == buf2.duration, "Same seed should produce same duration" +print("PASS: Deterministic with seed") + +# Test 3: Different seeds produce different results +buf3 = mcrfpy.SoundBuffer.sfxr("coin", seed=99) +# May have same count by chance, but params should differ +p1 = buf1.sfxr_params +p3 = buf3.sfxr_params +# At least one param should differ (with very high probability) +differs = any(p1[k] != p3[k] for k in p1.keys() if k != 'wave_type') +assert differs, "Different seeds should produce different params" +print("PASS: Different seeds produce different results") + +# Test 4: sfxr_params dict contains expected keys +params = buf1.sfxr_params +expected_keys = [ + 'wave_type', 'base_freq', 'freq_limit', 'freq_ramp', 'freq_dramp', + 'duty', 'duty_ramp', 'vib_strength', 'vib_speed', + 'env_attack', 'env_sustain', 'env_decay', 'env_punch', + 'lpf_freq', 'lpf_ramp', 'lpf_resonance', + 'hpf_freq', 'hpf_ramp', + 'pha_offset', 'pha_ramp', 'repeat_speed', + 'arp_speed', 'arp_mod' +] +for key in expected_keys: + assert key in params, f"Missing key '{key}' in sfxr_params" +print("PASS: sfxr_params has all expected keys") + +# Test 5: sfxr with custom params +buf_custom = mcrfpy.SoundBuffer.sfxr(wave_type=2, base_freq=0.5, env_decay=0.3) +assert buf_custom is not None +assert buf_custom.sfxr_params is not None +assert buf_custom.sfxr_params['wave_type'] == 2 +assert abs(buf_custom.sfxr_params['base_freq'] - 0.5) < 0.001 +print("PASS: sfxr with custom params") + +# Test 6: sfxr_mutate +mutated = buf1.sfxr_mutate(0.1) +assert mutated is not None +assert mutated.sfxr_params is not None +assert mutated.sample_count > 0 +# Params should be similar but different +mp = mutated.sfxr_params +op = buf1.sfxr_params +differs = any(abs(mp[k] - op[k]) > 0.0001 for k in mp.keys() if isinstance(mp[k], float)) +# Note: with small mutation and few params, there's a chance all stay same. +# But with 0.1 amount and ~20 float params, extremely unlikely all stay same. +print(f"PASS: sfxr_mutate produces {'different' if differs else 'similar'} params") + +# Test 7: sfxr_mutate with seed for reproducibility +m1 = buf1.sfxr_mutate(0.05, 42) +m2 = buf1.sfxr_mutate(0.05, 42) +assert m1.sample_count == m2.sample_count, "Same seed should produce same mutation" +print("PASS: sfxr_mutate deterministic with seed") + +# Test 8: sfxr_mutate on non-sfxr buffer raises error +tone_buf = mcrfpy.SoundBuffer.tone(440, 0.5, "sine") +try: + tone_buf.sfxr_mutate(0.1) + assert False, "Should have raised RuntimeError" +except RuntimeError: + pass +print("PASS: sfxr_mutate on non-sfxr buffer raises RuntimeError") + +# Test 9: Invalid preset raises ValueError +try: + mcrfpy.SoundBuffer.sfxr("nonexistent_preset") + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: Invalid preset raises ValueError") + +print("\nAll soundbuffer_sfxr tests passed!") +sys.exit(0) diff --git a/tests/unit/soundbuffer_sound_test.py b/tests/unit/soundbuffer_sound_test.py new file mode 100644 index 0000000..8e5732d --- /dev/null +++ b/tests/unit/soundbuffer_sound_test.py @@ -0,0 +1,85 @@ +"""Test Sound integration with SoundBuffer.""" +import mcrfpy +import sys + +# Test 1: Sound accepts SoundBuffer +buf = mcrfpy.SoundBuffer.tone(440, 0.5, "sine") +sound = mcrfpy.Sound(buf) +assert sound is not None +print("PASS: Sound(SoundBuffer) works") + +# Test 2: Sound.buffer returns the SoundBuffer +got_buf = sound.buffer +assert got_buf is not None +assert abs(got_buf.duration - buf.duration) < 0.02 +print("PASS: sound.buffer returns SoundBuffer") + +# Test 3: Sound.pitch property +assert sound.pitch == 1.0, f"Default pitch should be 1.0, got {sound.pitch}" +sound.pitch = 1.5 +assert abs(sound.pitch - 1.5) < 0.001 +sound.pitch = 1.0 +print("PASS: sound.pitch get/set") + +# Test 4: Sound.play_varied (in headless mode, just verifies no crash) +sound.play_varied(pitch_range=0.1, volume_range=3.0) +print("PASS: sound.play_varied() works") + +# Test 5: Sound from SoundBuffer has duration +assert sound.duration > 0 +print(f"PASS: Sound from SoundBuffer has duration {sound.duration:.3f}s") + +# Test 6: Sound from SoundBuffer has source '' +assert sound.source == "" +print("PASS: Sound.source is '' for buffer-created sounds") + +# Test 7: Backward compatibility - Sound still accepts string +# File may not exist, so we test that a string is accepted (not TypeError) +# and that RuntimeError is raised for missing files +sound2 = None +try: + sound2 = mcrfpy.Sound("test.ogg") + print("PASS: Sound(str) backward compatible (file loaded)") +except RuntimeError: + # File doesn't exist - that's fine, the important thing is it accepted a string + print("PASS: Sound(str) backward compatible (raises RuntimeError for missing file)") + +# Test 8: Sound from SoundBuffer - standard playback controls +sound.volume = 75.0 +assert abs(sound.volume - 75.0) < 0.1 +sound.loop = True +assert sound.loop == True +sound.loop = False +print("PASS: Standard playback controls work with SoundBuffer") + +# Test 9: sfxr buffer -> Sound pipeline +sfx = mcrfpy.SoundBuffer.sfxr("coin", seed=42) +coin_sound = mcrfpy.Sound(sfx) +assert coin_sound is not None +assert coin_sound.duration > 0 +print(f"PASS: sfxr -> Sound pipeline ({coin_sound.duration:.3f}s)") + +# Test 10: Effect chain -> Sound pipeline +processed = mcrfpy.SoundBuffer.tone(440, 0.3, "saw").low_pass(2000).normalize() +proc_sound = mcrfpy.Sound(processed) +assert proc_sound is not None +assert proc_sound.duration > 0 +print(f"PASS: Effects -> Sound pipeline ({proc_sound.duration:.3f}s)") + +# Test 11: Sound with invalid argument type +try: + mcrfpy.Sound(42) + assert False, "Should have raised TypeError" +except TypeError: + pass +print("PASS: Sound(int) raises TypeError") + +# Test 12: Sound.buffer is None for file-loaded sounds +if sound2 is not None: + assert sound2.buffer is None + print("PASS: Sound.buffer is None for file-loaded sounds") +else: + print("PASS: Sound.buffer test skipped (file not available)") + +print("\nAll soundbuffer_sound tests passed!") +sys.exit(0) diff --git a/tests/unit/soundbuffer_tone_test.py b/tests/unit/soundbuffer_tone_test.py new file mode 100644 index 0000000..d2f599a --- /dev/null +++ b/tests/unit/soundbuffer_tone_test.py @@ -0,0 +1,68 @@ +"""Test SoundBuffer tone generation.""" +import mcrfpy +import sys + +# Test 1: Basic sine tone +buf = mcrfpy.SoundBuffer.tone(440, 0.5, "sine") +assert buf is not None +assert abs(buf.duration - 0.5) < 0.02, f"Expected ~0.5s, got {buf.duration}" +assert buf.sample_rate == 44100 +assert buf.channels == 1 +print("PASS: Sine tone 440Hz 0.5s") + +# Test 2: Square wave +buf = mcrfpy.SoundBuffer.tone(220, 0.3, "square") +assert abs(buf.duration - 0.3) < 0.02 +print("PASS: Square wave") + +# Test 3: Saw wave +buf = mcrfpy.SoundBuffer.tone(330, 0.2, "saw") +assert abs(buf.duration - 0.2) < 0.02 +print("PASS: Saw wave") + +# Test 4: Triangle wave +buf = mcrfpy.SoundBuffer.tone(550, 0.4, "triangle") +assert abs(buf.duration - 0.4) < 0.02 +print("PASS: Triangle wave") + +# Test 5: Noise +buf = mcrfpy.SoundBuffer.tone(1000, 0.1, "noise") +assert abs(buf.duration - 0.1) < 0.02 +print("PASS: Noise") + +# Test 6: ADSR envelope +buf = mcrfpy.SoundBuffer.tone(440, 1.0, "sine", + attack=0.1, decay=0.2, sustain=0.5, release=0.3) +assert abs(buf.duration - 1.0) < 0.02 +print("PASS: ADSR envelope") + +# Test 7: Custom sample rate +buf = mcrfpy.SoundBuffer.tone(440, 0.5, "sine", sample_rate=22050) +assert buf.sample_rate == 22050 +assert abs(buf.duration - 0.5) < 0.02 +print("PASS: Custom sample rate") + +# Test 8: Invalid waveform raises ValueError +try: + mcrfpy.SoundBuffer.tone(440, 0.5, "invalid_waveform") + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: Invalid waveform raises ValueError") + +# Test 9: Negative duration raises ValueError +try: + mcrfpy.SoundBuffer.tone(440, -0.5, "sine") + assert False, "Should have raised ValueError" +except ValueError: + pass +print("PASS: Negative duration raises ValueError") + +# Test 10: Samples are non-zero (tone actually generates audio) +buf = mcrfpy.SoundBuffer.tone(440, 0.1, "sine") +# In headless mode, sample_count should be nonzero +assert buf.sample_count > 0, "Expected non-zero sample count" +print(f"PASS: Tone has {buf.sample_count} samples") + +print("\nAll soundbuffer_tone tests passed!") +sys.exit(0)