From 3fd611ecea46c12c253444675a7573bd1fab9886 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 25 Nov 2025 21:20:05 +0100 Subject: [PATCH] Sync upstream --- LXST/Filters.def | 4 + LXST/Filters.py | 8 +- LXST/Platforms/android/soundcard.py | 78 +- LXST/Platforms/darwin/__init__.py | 0 LXST/Platforms/darwin/coreaudio.h | 261 +++++++ LXST/Platforms/darwin/soundcard.py | 945 +++++++++++++++++++++++ LXST/Platforms/linux/__init__.py | 0 LXST/Platforms/linux/pulseaudio.h | 419 ++++++++++ LXST/Platforms/linux/soundcard.py | 944 ++++++++++++++++++++++ LXST/Platforms/windows/__init__.py | 0 LXST/Platforms/windows/mediafoundation.h | 256 ++++++ LXST/Platforms/windows/soundcard.py | 641 +++++++++++++++ LXST/Primitives/Telephony.py | 33 +- LXST/Sinks.py | 36 +- LXST/Sources.py | 36 +- LXST/_version.py | 2 +- Makefile | 10 +- README.md | 2 +- lib/static/filterlib.dll | Bin 0 -> 104960 bytes setup.py | 10 +- 20 files changed, 3610 insertions(+), 75 deletions(-) create mode 100644 LXST/Filters.def create mode 100644 LXST/Platforms/darwin/__init__.py create mode 100644 LXST/Platforms/darwin/coreaudio.h create mode 100644 LXST/Platforms/darwin/soundcard.py create mode 100644 LXST/Platforms/linux/__init__.py create mode 100644 LXST/Platforms/linux/pulseaudio.h create mode 100644 LXST/Platforms/linux/soundcard.py create mode 100644 LXST/Platforms/windows/__init__.py create mode 100644 LXST/Platforms/windows/mediafoundation.h create mode 100644 LXST/Platforms/windows/soundcard.py create mode 100644 lib/static/filterlib.dll diff --git a/LXST/Filters.def b/LXST/Filters.def new file mode 100644 index 0000000..1ca2e84 --- /dev/null +++ b/LXST/Filters.def @@ -0,0 +1,4 @@ +EXPORTS +highpass_filter +lowpass_filter +agc_process \ No newline at end of file diff --git a/LXST/Filters.py b/LXST/Filters.py index c5c870d..c08ec95 100644 --- a/LXST/Filters.py +++ b/LXST/Filters.py @@ -16,16 +16,16 @@ else: ffi = FFI() try: - # Disable native filterlib loading on Windows - # for now due to strange linking behaviour, - # but allow local compilation if the user has - # a C compiler installed. if not RNS.vendor.platformutils.is_windows(): filterlib_spec = find_spec("LXST.filterlib") if not filterlib_spec or filterlib_spec.origin == None: raise ImportError("Could not locate pre-compiled LXST.filterlib module") with open(os.path.join(c_src_path, "Filters.h"), "r") as f: ffi.cdef(f.read()) native_functions = ffi.dlopen(filterlib_spec.origin) USE_NATIVE_FILTERS = True + else: + with open(os.path.join(c_src_path, "Filters.h"), "r") as f: ffi.cdef(f.read()) + native_functions = ffi.dlopen(os.path.join(c_src_path, "filterlib.dll")) + USE_NATIVE_FILTERS = True except Exception as e: RNS.log(f"Could not load pre-compiled LXST filters library. The contained exception was: {e}", RNS.LOG_WARNING) diff --git a/LXST/Platforms/android/soundcard.py b/LXST/Platforms/android/soundcard.py index 2639157..b7e22c9 100644 --- a/LXST/Platforms/android/soundcard.py +++ b/LXST/Platforms/android/soundcard.py @@ -1,3 +1,33 @@ +# Reticulum License +# +# Copyright (c) 2025 Mark Qvist +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# - The Software shall not be used in any kind of system which includes amongst +# its functions the ability to purposefully do harm to human beings. +# +# - The Software shall not be used, directly or indirectly, in the creation of +# an artificial intelligence, machine learning or language model training +# dataset, including but not limited to any use that contributes to the +# training or development of such a model or algorithm. +# +# - The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + import atexit import collections.abc import time @@ -81,28 +111,28 @@ class _AndroidAudio: # Populate device type descriptions from JNI self.device_type_descriptions = { - adi.TYPE_AUX_LINE: "Aux Line", # 0x13 - API level 23 - adi.TYPE_BLUETOOTH_A2DP: "Bluetooth A2DP", # 0x08 - API level 23 - adi.TYPE_BLUETOOTH_SCO: "Bluetooth SCO", # 0x07 - API level 23 + adi.TYPE_AUX_LINE: "Aux Line", # 0x13 - API level 23 + adi.TYPE_BLUETOOTH_A2DP: "Bluetooth A2DP", # 0x08 - API level 23 + adi.TYPE_BLUETOOTH_SCO: "Bluetooth SCO", # 0x07 - API level 23 adi.TYPE_BUILTIN_EARPIECE: "Internal Earpiece", # 0x01 - API level 23 - adi.TYPE_BUILTIN_MIC: "Internal Microphone", # 0x0f - API level 23 - adi.TYPE_BUILTIN_SPEAKER: "Internal Speaker", # 0x02 - API level 23 - adi.TYPE_DOCK: "Dock", # 0x0d - API level 23 - adi.TYPE_FM: "FM", # 0x0e - API level 23 - adi.TYPE_FM_TUNER: "FM Tuner", # 0x10 - API level 23 - adi.TYPE_HDMI: "HDMI", # 0x09 - API level 23 - adi.TYPE_HDMI_ARC: "HDMI ARC", # 0x0a - API level 23 - adi.TYPE_IP: "IP", # 0x14 - API level 23 - adi.TYPE_LINE_ANALOG: "Analog Line", # 0x05 - API level 23 - adi.TYPE_LINE_DIGITAL: "Digital Line", # 0x06 - API level 23 - adi.TYPE_TELEPHONY: "Telephony", # 0x12 - API level 23 - adi.TYPE_TV_TUNER: "TV Tuner", # 0x11 - API level 23 - adi.TYPE_UNKNOWN: "Unknown", # 0x00 - API level 23 - adi.TYPE_USB_ACCESSORY: "USB Accessory", # 0x0c - API level 23 - adi.TYPE_USB_DEVICE: "USB Device", # 0x0b - API level 23 - adi.TYPE_WIRED_HEADPHONES: "Wired Headphones", # 0x04 - API level 23 - adi.TYPE_WIRED_HEADSET: "Wired Headset", # 0x03 - API level 23 - adi.TYPE_BUS: "Bus", # 0x15 - API level 24 + adi.TYPE_BUILTIN_MIC: "Internal Microphone", # 0x0f - API level 23 + adi.TYPE_BUILTIN_SPEAKER: "Internal Speaker", # 0x02 - API level 23 + adi.TYPE_DOCK: "Dock", # 0x0d - API level 23 + adi.TYPE_FM: "FM", # 0x0e - API level 23 + adi.TYPE_FM_TUNER: "FM Tuner", # 0x10 - API level 23 + adi.TYPE_HDMI: "HDMI", # 0x09 - API level 23 + adi.TYPE_HDMI_ARC: "HDMI ARC", # 0x0a - API level 23 + adi.TYPE_IP: "IP", # 0x14 - API level 23 + adi.TYPE_LINE_ANALOG: "Analog Line", # 0x05 - API level 23 + adi.TYPE_LINE_DIGITAL: "Digital Line", # 0x06 - API level 23 + adi.TYPE_TELEPHONY: "Telephony", # 0x12 - API level 23 + adi.TYPE_TV_TUNER: "TV Tuner", # 0x11 - API level 23 + adi.TYPE_UNKNOWN: "Unknown", # 0x00 - API level 23 + adi.TYPE_USB_ACCESSORY: "USB Accessory", # 0x0c - API level 23 + adi.TYPE_USB_DEVICE: "USB Device", # 0x0b - API level 23 + adi.TYPE_WIRED_HEADPHONES: "Wired Headphones", # 0x04 - API level 23 + adi.TYPE_WIRED_HEADSET: "Wired Headset", # 0x03 - API level 23 + adi.TYPE_BUS: "Bus", # 0x15 - API level 24 } if self.android_api_version >= 26: @@ -115,9 +145,9 @@ class _AndroidAudio: self.device_type_descriptions[adi.TYPE_BUILTIN_SPEAKER_SAFE] = "Ringer Speaker" # 0x18 - API level 30 if self.android_api_version >= 31: - self.device_type_descriptions[adi.TYPE_BLE_HEADSET] = "BLE Headset" # 0x1a - API level 31 - self.device_type_descriptions[adi.TYPE_BLE_SPEAKER] = "BLE Speaker" # 0x1b - API level 31 - self.device_type_descriptions[adi.TYPE_HDMI_EARC] = "HDMI EARC" # 0x1d - API level 31 + self.device_type_descriptions[adi.TYPE_BLE_HEADSET] = "BLE Headset" # 0x1a - API level 31 + self.device_type_descriptions[adi.TYPE_BLE_SPEAKER] = "BLE Speaker" # 0x1b - API level 31 + self.device_type_descriptions[adi.TYPE_HDMI_EARC] = "HDMI EARC" # 0x1d - API level 31 self.device_type_descriptions[adi.TYPE_REMOTE_SUBMIX] = "Remote Submix" # 0x19 - API level 31 if self.android_api_version >= 33: diff --git a/LXST/Platforms/darwin/__init__.py b/LXST/Platforms/darwin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/LXST/Platforms/darwin/coreaudio.h b/LXST/Platforms/darwin/coreaudio.h new file mode 100644 index 0000000..cd9824f --- /dev/null +++ b/LXST/Platforms/darwin/coreaudio.h @@ -0,0 +1,261 @@ +// All files are found in /System/Library/Frameworks + +// CoreFoundation/CFBase.h: +typedef unsigned char Boolean; +typedef unsigned char UInt8; +typedef signed char SInt8; +typedef unsigned short UInt16; +typedef signed short SInt16; +typedef unsigned int UInt32; +typedef signed int SInt32; +typedef uint64_t UInt64; +typedef int64_t SInt64; +typedef SInt32 OSStatus; +typedef float Float32; +typedef double Float64; +typedef unsigned short UniChar; +typedef unsigned long UniCharCount; +typedef unsigned char * StringPtr; +typedef const unsigned char * ConstStringPtr; +typedef unsigned char Str255[256]; +typedef const unsigned char * ConstStr255Param; +typedef SInt16 OSErr; +typedef SInt16 RegionCode; +typedef SInt16 LangCode; +typedef SInt16 ScriptCode; +typedef UInt32 FourCharCode; +typedef FourCharCode OSType; +typedef UInt8 Byte; +typedef SInt8 SignedByte; +typedef UInt32 UTF32Char; +typedef UInt16 UTF16Char; +typedef UInt8 UTF8Char; +typedef signed long long CFIndex; +typedef const void * CFStringRef; + +// CoreFoundation/CFString.h +typedef UInt32 CFStringEncoding; +CFIndex CFStringGetLength(CFStringRef theString); +Boolean CFStringGetCString(CFStringRef theString, char *buffer, CFIndex bufferSize, CFStringEncoding encoding); + +// CoreFoundation/CFRunLoop.h +typedef struct __CFRunLoop * CFRunLoopRef; + +// CoreAudio/AudioHardwareBase.h +typedef UInt32 AudioObjectID; +typedef UInt32 AudioObjectPropertySelector; +typedef UInt32 AudioObjectPropertyScope; +typedef UInt32 AudioObjectPropertyElement; +struct AudioObjectPropertyAddress +{ + AudioObjectPropertySelector mSelector; + AudioObjectPropertyScope mScope; + AudioObjectPropertyElement mElement; +}; +typedef struct AudioObjectPropertyAddress AudioObjectPropertyAddress; + +// CoreAudio/AudioHardware.h +Boolean AudioObjectHasProperty(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress); +OSStatus AudioObjectGetPropertyDataSize(AudioObjectID inObjectID, + const AudioObjectPropertyAddress* inAddress, + UInt32 inQualifierDataSize, + const void* inQualifierData, + UInt32* outDataSize); +OSStatus AudioObjectGetPropertyData(AudioObjectID inObjectID, + const AudioObjectPropertyAddress* inAddress, + UInt32 inQualifierDataSize, + const void* inQualifierData, + UInt32* ioDataSize, + void* outData); +OSStatus AudioObjectSetPropertyData(AudioObjectID inObjectID, + const AudioObjectPropertyAddress* inAddress, + UInt32 inQualifierDataSize, + const void* inQualifierData, + UInt32 inDataSize, + const void* inData); + + +// CoreAudioTypes.h +typedef UInt32 AudioFormatID; +typedef UInt32 AudioFormatFlags; +struct AudioStreamBasicDescription +{ + Float64 mSampleRate; + AudioFormatID mFormatID; + AudioFormatFlags mFormatFlags; + UInt32 mBytesPerPacket; + UInt32 mFramesPerPacket; + UInt32 mBytesPerFrame; + UInt32 mChannelsPerFrame; + UInt32 mBitsPerChannel; + UInt32 mReserved; +}; +typedef struct AudioStreamBasicDescription AudioStreamBasicDescription; +struct AudioStreamPacketDescription +{ + SInt64 mStartOffset; + UInt32 mVariableFramesInPacket; + UInt32 mDataByteSize; +}; +typedef struct AudioStreamPacketDescription AudioStreamPacketDescription; + +// AudioToolbox/AudioQueue.h + +// data structures: + +struct SMPTETime +{ + SInt16 mSubframes; + SInt16 mSubframeDivisor; + UInt32 mCounter; + UInt32 mType; + UInt32 mFlags; + SInt16 mHours; + SInt16 mMinutes; + SInt16 mSeconds; + SInt16 mFrames; +}; +typedef struct SMPTETime SMPTETime; +struct AudioTimeStamp +{ + Float64 mSampleTime; + UInt64 mHostTime; + Float64 mRateScalar; + UInt64 mWordClockTime; + SMPTETime mSMPTETime; + UInt32 mFlags; + UInt32 mReserved; +}; +typedef struct AudioTimeStamp AudioTimeStamp; + +// AudioComponent.h + +typedef struct AudioComponentDescription { + OSType componentType; + OSType componentSubType; + OSType componentManufacturer; + UInt32 componentFlags; + UInt32 componentFlagsMask; +} AudioComponentDescription; +typedef struct OpaqueAudioComponent * AudioComponent; +typedef struct ComponentInstanceRecord * AudioComponentInstance; +AudioComponent AudioComponentFindNext(AudioComponent inComponent, + const AudioComponentDescription *inDesc); +OSStatus AudioComponentInstanceNew(AudioComponent inComponent, + AudioComponentInstance *outInstance); +OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance); +OSStatus AudioComponentCopyName(AudioComponent inComponent, + CFStringRef *outName); +OSStatus AudioComponentGetDescription(AudioComponent inComponent, + AudioComponentDescription *outDesc); + +// AUComponent.h + +typedef AudioComponentInstance AudioUnit; +typedef UInt32 AudioUnitPropertyID; +typedef UInt32 AudioUnitScope; +typedef UInt32 AudioUnitElement; + +OSStatus AudioUnitInitialize(AudioUnit inUnit); +OSStatus AudioUnitGetPropertyInfo(AudioUnit inUnit, + AudioUnitPropertyID inID, + AudioUnitScope inScope, + AudioUnitElement inElement, + UInt32 *outDataSize, + Boolean *outWritable); +OSStatus AudioUnitGetProperty(AudioUnit inUnit, + AudioUnitPropertyID inID, + AudioUnitScope inScope, + AudioUnitElement inElement, + void *outData, + UInt32 *ioDataSize); +OSStatus AudioUnitSetProperty(AudioUnit inUnit, + AudioUnitPropertyID inID, + AudioUnitScope inScope, + AudioUnitElement inElement, + const void *inData, + UInt32 inDataSize); + +OSStatus AudioOutputUnitStart(AudioUnit ci); +OSStatus AudioOutputUnitStop(AudioUnit ci); + +typedef UInt32 AudioUnitRenderActionFlags; + +struct AudioBuffer +{ + UInt32 mNumberChannels; + UInt32 mDataByteSize; + void* mData; +}; +typedef struct AudioBuffer AudioBuffer; + +struct AudioBufferList +{ + UInt32 mNumberBuffers; + AudioBuffer mBuffers[]; // this is a variable length array of mNumberBuffers elements +}; +typedef struct AudioBufferList AudioBufferList; + +OSStatus AudioUnitProcess(AudioUnit inUnit, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inNumberFrames, + AudioBufferList *ioData); +OSStatus AudioUnitRender(AudioUnit inUnit, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp * inTimeStamp, + UInt32 inOutputBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData); + +typedef OSStatus (*AURenderCallback)(void * inRefCon, + AudioUnitRenderActionFlags *ioActionFlags, + const AudioTimeStamp *inTimeStamp, + UInt32 inBusNumber, + UInt32 inNumberFrames, + AudioBufferList *ioData); + +typedef struct AURenderCallbackStruct { + AURenderCallback inputProc; + void *inputProcRefCon; +} AURenderCallbackStruct; + +struct AudioValueRange +{ + Float64 mMinimum; + Float64 mMaximum; +}; +typedef struct AudioValueRange AudioValueRange; + + +// AudioConverter.h +typedef struct OpaqueAudioConverter * AudioConverterRef; +typedef UInt32 AudioConverterPropertyID; + +OSStatus AudioConverterNew(const AudioStreamBasicDescription *inSourceFormat, + const AudioStreamBasicDescription *inDestinationFormat, + AudioConverterRef *outAudioConverter); +OSStatus AudioConverterDispose(AudioConverterRef inAudioConverter); +typedef OSStatus (*AudioConverterComplexInputDataProc)( + AudioConverterRef inAudioConverter, + UInt32 *ioNumberDataPackets, + AudioBufferList *ioData, + AudioStreamPacketDescription **outDataPacketDescription, + void *inUserData); +extern OSStatus AudioConverterFillComplexBuffer( + AudioConverterRef inAudioConverter, + AudioConverterComplexInputDataProc inInputDataProc, + void *inInputDataProcUserData, + UInt32 *ioOutputDataPacketSize, + AudioBufferList *outOutputData, + AudioStreamPacketDescription *outPacketDescription); +extern OSStatus AudioConverterSetProperty( + AudioConverterRef inAudioConverter, + AudioConverterPropertyID inPropertyID, + UInt32 inPropertyDataSize, + const void *inPropertyData); +extern OSStatus AudioConverterGetProperty( + AudioConverterRef inAudioConverter, + AudioConverterPropertyID inPropertyID, + UInt32 *ioPropertyDataSize, + void *outPropertyData); diff --git a/LXST/Platforms/darwin/soundcard.py b/LXST/Platforms/darwin/soundcard.py new file mode 100644 index 0000000..a9ed662 --- /dev/null +++ b/LXST/Platforms/darwin/soundcard.py @@ -0,0 +1,945 @@ +# Adapted from Bastian Bechtold's soundcard library, originally released +# under the BSD 3-Clause License +# +# https://github.com/bastibe/SoundCard +# +# Copyright (c) 2016 Bastian Bechtold +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Modifications and improvements Copyright 2025 Mark Qvist, and released +# under the same BSD 3-Clause License. + +import os +import cffi +import numpy +import collections +import time +import re +import math +import threading +import warnings + +_ffi = cffi.FFI() +_package_dir, _ = os.path.split(__file__) +with open(os.path.join(_package_dir, 'coreaudio.h'), 'rt') as f: + _ffi.cdef(f.read()) + +_ca = _ffi.dlopen('CoreAudio') +_au = _ffi.dlopen('AudioUnit') + +from soundcard import coreaudioconstants as _cac + + +def all_speakers(): + """A list of all connected speakers.""" + device_ids = _CoreAudio.get_property( + _cac.kAudioObjectSystemObject, + _cac.kAudioHardwarePropertyDevices, + "AudioObjectID") + return [_Speaker(id=d) for d in device_ids + if _Speaker(id=d).channels > 0] + + +def all_microphones(include_loopback=False): + """A list of all connected microphones.""" + + # macOS does not support loopback recording functionality + if include_loopback: + warnings.warn("macOS does not support loopback recording functionality", Warning) + + device_ids = _CoreAudio.get_property( + _cac.kAudioObjectSystemObject, + _cac.kAudioHardwarePropertyDevices, + "AudioObjectID") + return [_Microphone(id=d) for d in device_ids + if _Microphone(id=d).channels > 0] + + +def default_speaker(): + """The default speaker of the system.""" + device_id, = _CoreAudio.get_property( + _cac.kAudioObjectSystemObject, + _cac.kAudioHardwarePropertyDefaultOutputDevice, + "AudioObjectID") + return _Speaker(id=device_id) + + +def get_speaker(id): + """Get a specific speaker by a variety of means. + + id can be an a CoreAudio id, a substring of the speaker name, or a + fuzzy-matched pattern for the speaker name. + + """ + return _match_device(id, all_speakers()) + + +def default_microphone(): + """The default microphone of the system.""" + device_id, = _CoreAudio.get_property( + _cac.kAudioObjectSystemObject, + _cac.kAudioHardwarePropertyDefaultInputDevice, + "AudioObjectID") + return _Microphone(id=device_id) + + +def get_microphone(id, include_loopback=False): + """Get a specific microphone by a variety of means. + + id can be a CoreAudio id, a substring of the microphone name, or a + fuzzy-matched pattern for the microphone name. + + """ + return _match_device(id, all_microphones(include_loopback)) + + +def _match_device(id, devices): + """Find id in a list of devices. + + id can be a CoreAudio id, a substring of the device name, or a + fuzzy-matched pattern for the microphone name. + + """ + devices_by_id = {device.id: device for device in devices} + devices_by_name = {device.name: device for device in devices} + if id in devices_by_id: + return devices_by_id[id] + # try substring match: + for name, device in devices_by_name.items(): + if id in name: + return device + # try fuzzy match: + pattern = '.*'.join(id) + for name, device in devices_by_name.items(): + if re.match(pattern, name): + return device + raise IndexError('no device with id {}'.format(id)) + + +def get_name(): + raise NotImplementedError() + + +def set_name(name): + raise NotImplementedError() + + +class _Soundcard: + """A soundcard. This is meant to be subclassed. + + Properties: + - `name`: the name of the soundcard + + """ + def __init__(self, *, id): + self._id = id + + @property + def id(self): + return self._id + + @property + def name(self): + name = _CoreAudio.get_property( + self._id, _cac.kAudioObjectPropertyName, 'CFStringRef') + return _CoreAudio.CFString_to_str(name) + + +class _Speaker(_Soundcard): + """A soundcard output. Can be used to play audio. + + Use the `play` method to play one piece of audio, or use the + `player` method to get a context manager for playing continuous + audio. + + Properties: + - `channels`: either the number of channels to play, or a list + of channel indices. Index -1 is silence, and subsequent numbers + are channel numbers (left, right, center, ...) + - `name`: the name of the soundcard + + """ + + @property + def channels(self): + bufferlist = _CoreAudio.get_property( + self._id, + _cac.kAudioDevicePropertyStreamConfiguration, + 'AudioBufferList', scope=_cac.kAudioObjectPropertyScopeOutput) + if bufferlist and bufferlist[0].mNumberBuffers > 0: + return bufferlist[0].mBuffers[0].mNumberChannels + else: + return 0 + + def __repr__(self): + return ''.format(self.name, self.channels) + + def player(self, samplerate, channels=None, blocksize=None): + if channels is None: + channels = self.channels + return _Player(self._id, samplerate, channels, blocksize) + + def play(self, data, samplerate, channels=None, blocksize=None): + if channels is None and len(data.shape) == 2: + channels = data.shape[1] + elif channels is None: + channels = self.channels + with self.player(samplerate, channels, blocksize) as p: + p.play(data) + + +class _Microphone(_Soundcard): + """A soundcard input. Can be used to record audio. + + Use the `record` method to record a piece of audio, or use the + `recorder` method to get a context manager for recording + continuous audio. + + Properties: + - `channels`: either the number of channels to record, or a list + of channel indices. Index -1 is silence, and subsequent numbers + are channel numbers (left, right, center, ...) + - `name`: the name of the soundcard + + """ + + @property + def isloopback(self): + return False + + @property + def channels(self): + bufferlist = _CoreAudio.get_property( + self._id, + _cac.kAudioDevicePropertyStreamConfiguration, + 'AudioBufferList', scope=_cac.kAudioObjectPropertyScopeInput) + if bufferlist and bufferlist[0].mNumberBuffers > 0: + return bufferlist[0].mBuffers[0].mNumberChannels + else: + return 0 + + def __repr__(self): + return ''.format(self.name, self.channels) + + def recorder(self, samplerate, channels=None, blocksize=None): + if channels is None: + channels = self.channels + return _Recorder(self._id, samplerate, channels, blocksize) + + def record(self, numframes, samplerate, channels=None, blocksize=None): + if channels is None: + channels = self.channels + with self.recorder(samplerate, channels, blocksize) as p: + return p.record(numframes) + + +class _CoreAudio: + """A helper class for interacting with CoreAudio.""" + + @staticmethod + def get_property(target, selector, ctype, scope=_cac.kAudioObjectPropertyScopeGlobal): + """Get a CoreAudio property. + + This might include things like a list of available sound + cards, or various meta data about those sound cards. + + Arguments: + - `target`: The AudioObject that the property belongs to + - `selector`: The Selector for this property + - `scope`: The Scope for this property + - `ctype`: The type of the property + + Returns: + A list of objects of type `ctype` + + """ + + prop = _ffi.new("AudioObjectPropertyAddress*", + {'mSelector': selector, + 'mScope': scope, + 'mElement': _cac.kAudioObjectPropertyElementMaster}) + + has_prop = _ca.AudioObjectHasProperty(target, prop) + assert has_prop == 1, 'Core Audio does not have the requested property' + + size = _ffi.new("UInt32*") + err = _ca.AudioObjectGetPropertyDataSize(target, prop, 0, _ffi.NULL, size) + assert err == 0, "Can't get Core Audio property size" + num_values = int(size[0]//_ffi.sizeof(ctype)) + + prop_data = _ffi.new(ctype+'[]', num_values) + err = _ca.AudioObjectGetPropertyData(target, prop, 0, _ffi.NULL, + size, prop_data) + assert err == 0, "Can't get Core Audio property data" + return prop_data + + @staticmethod + def set_property(target, selector, prop_data, scope=_cac.kAudioObjectPropertyScopeGlobal): + """Set a CoreAudio property. + + This is typically a piece of meta data about a sound card. + + Arguments: + - `target`: The AudioObject that the property belongs to + - `selector`: The Selector for this property + - `scope`: The Scope for this property + - `prop_data`: The new property value + + """ + + prop = _ffi.new("AudioObjectPropertyAddress*", + {'mSelector': selector, + 'mScope': scope, + 'mElement': _cac.kAudioObjectPropertyElementMaster}) + + err = _ca.AudioObjectSetPropertyData(target, prop, 0, _ffi.NULL, + _ffi.sizeof(_ffi.typeof(prop_data).item.cname), prop_data) + assert err == 0, "Can't set Core Audio property data" + + @staticmethod + def CFString_to_str(cfstrptr): + """Converts a CFStringRef to a Python str.""" + + # Multiply by 4, the maximum number of bytes used per character in UTF-8. + str_length = _ca.CFStringGetLength(cfstrptr[0]) * 4 + str_buffer = _ffi.new('char[]', str_length+1) + + err = _ca.CFStringGetCString(cfstrptr[0], str_buffer, str_length+1, _cac.kCFStringEncodingUTF8) + assert err == 1, "Could not decode string" + + return _ffi.string(str_buffer).decode() + + +class _Player: + """A context manager for an active output stream. + + Audio playback is available as soon as the context manager is + entered. Audio data can be played using the `play` method. + Successive calls to `play` will queue up the audio one piece after + another. If no audio is queued up, this will play silence. + + This context manager can only be entered once, and can not be used + after it is closed. + + """ + + def __init__(self, id, samplerate, channels, blocksize=None): + self._au = _AudioUnit("output", id, samplerate, channels, blocksize) + + def __enter__(self): + self._queue = collections.deque() + + @_ffi.callback("AURenderCallback") + def render_callback(userdata, actionflags, timestamp, + busnumber, numframes, bufferlist): + for bufferidx in range(bufferlist.mNumberBuffers): + dest = bufferlist.mBuffers[bufferidx] + channels = dest.mNumberChannels + bytes_written = 0 + to_write = dest.mDataByteSize + while bytes_written < to_write: + if self._queue: + data = self._queue.popleft() + srcbuffer = _ffi.from_buffer(data) + numbytes = min(len(srcbuffer), to_write-bytes_written) + _ffi.memmove(dest.mData+bytes_written, srcbuffer, numbytes) + if numbytes < len(srcbuffer): + leftover = data[numbytes//4//channels:] + self._queue.appendleft(leftover) + bytes_written += numbytes + else: + src = bytearray(to_write-bytes_written) + _ffi.memmove(dest.mData+bytes_written, src, len(src)) + bytes_written += len(src) + return 0 + + self._au.set_callback(render_callback) + + self._au.start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._au.close() + + def play(self, data, wait=True): + """Play some audio data. + + Internally, all data is handled as float32 and with the + appropriate number of channels. For maximum performance, + provide data as a `frames × channels` float32 numpy array. + + If single-channel or one-dimensional data is given, this data + will be played on all available channels. + + This function will return *before* all data has been played, + so that additional data can be provided for gapless playback. + The amount of buffering can be controlled through the + blocksize of the player object. + + If data is provided faster than it is played, later pieces + will be queued up and played one after another. + + """ + + data = numpy.asarray(data, dtype="float32", order='C') + data[data>1] = 1 + data[data<-1] = -1 + if data.ndim == 1: + data = data[:, None] # force 2d + if data.ndim != 2: + raise TypeError('data must be 1d or 2d, not {}d'.format(data.ndim)) + if data.shape[1] == 1 and self._au.channels != 1: + data = numpy.tile(data, [1, self._au.channels]) + if data.shape[1] != self._au.channels: + raise TypeError('second dimension of data must be equal to the number of channels, not {}'.format(data.shape[1])) + idx = 0 + while idx < len(data)-self._au.blocksize: + self._queue.append(data[idx:idx+self._au.blocksize]) + idx += self._au.blocksize + self._queue.append(data[idx:]) + while self._queue and wait: + time.sleep(0.001) + +class _AudioUnit: + """Communication helper with AudioUnits. + + This provides an abstraction over a single AudioUnit. Can be used + as soon as it instatiated. + + Properties: + - `enableinput`, `enableoutput`: set up the AudioUnit for playback + or recording. It is not possible to record and play at the same + time. + - `device`: The numeric ID of the underlying CoreAudio device. + - `blocksize`: The amount of buffering in the AudioUnit. Values + outside of `blocksizerange` will be silently clamped to that + range. + - `blocksizerange`: The minimum and maximum possible block size. + - `samplerate`: The sampling rate of the CoreAudio device. This + will lead to errors if changed in a recording AudioUnit. + - `channels`: The number of channels of the AudioUnit. + + """ + + def __init__(self, iotype, device, samplerate, channels, blocksize): + self._iotype = iotype + + desc = _ffi.new( + "AudioComponentDescription*", + dict(componentType=_cac.kAudioUnitType_Output, + componentSubType=_cac.kAudioUnitSubType_HALOutput, + componentFlags=0, + componentFlagsMask=0, + componentManufacturer=_cac.kAudioUnitManufacturer_Apple)) + + audiocomponent = _au.AudioComponentFindNext(_ffi.NULL, desc) + if not audiocomponent: + raise RuntimeError("could not find audio component") + self.ptr = _ffi.new("AudioComponentInstance*") + status = _au.AudioComponentInstanceNew(audiocomponent, self.ptr) + if status: + raise RuntimeError(_cac.error_number_to_string(status)) + + if iotype == 'input': + self.enableinput = True + self.enableoutput = False + self._au_scope = _cac.kAudioUnitScope_Output + self._au_element = 1 + elif iotype == 'output': + self.enableinput = False + self.enableoutput = True + self._au_scope = _cac.kAudioUnitScope_Input + self._au_element = 0 + + self.device = device + + blocksize = blocksize or self.blocksize + + # Input AudioUnits can't use non-native sample rates. + # Therefore, if a non-native sample rate is requested, use a + # resampled block size and resample later, manually: + if iotype == 'input': + # Get the input device format + curr_device_format = self._get_property(_cac.kAudioUnitProperty_StreamFormat, + _cac.kAudioUnitScope_Input, + 1, + "AudioStreamBasicDescription") + + self.samplerate = curr_device_format[0].mSampleRate + self.resample = self.samplerate/samplerate + else: + self.resample = 1 + self.samplerate = samplerate + + # there are two maximum block sizes for some reason: + maxblocksize = min(self.blocksizerange[1], + self.maxblocksize) + if self.blocksizerange[0] <= blocksize <= maxblocksize: + self.blocksize = blocksize + else: + raise TypeError("blocksize must be between {} and {}" + .format(self.blocksizerange[0], + maxblocksize)) + + if isinstance(channels, collections.abc.Iterable): + if iotype == 'output': + # invert channel map and fill with -1 ([2, 0] -> [1, -1, 0]): + self.channels = len([c for c in channels if c >= 0]) + channelmap = [-1]*(max(channels)+1) + for idx, c in enumerate(channels): + channelmap[c] = idx + self.channelmap = channelmap + else: + self.channels = len(channels) + self.channelmap = channels + elif isinstance(channels, int): + self.channels = channels + else: + raise TypeError('channels must be iterable or integer') + self._set_channels(self.channels) + + def _set_property(self, property, scope, element, data): + if '[]' in _ffi.typeof(data).cname: + num_values = len(data) + else: + num_values = 1 + status = _au.AudioUnitSetProperty(self.ptr[0], + property, scope, element, + data, _ffi.sizeof(_ffi.typeof(data).item.cname)*num_values) + if status != 0: + raise RuntimeError(_cac.error_number_to_string(status)) + + def _get_property(self, property, scope, element, type): + datasize = _ffi.new("UInt32*") + status = _au.AudioUnitGetPropertyInfo(self.ptr[0], + property, scope, element, + datasize, _ffi.NULL) + num_values = datasize[0]//_ffi.sizeof(type) + data = _ffi.new(type + '[{}]'.format(num_values)) + status = _au.AudioUnitGetProperty(self.ptr[0], + property, scope, element, + data, datasize) + if status != 0: + raise RuntimeError(_cac.error_number_to_string(status)) + # return trivial data trivially + if num_values == 1 and (type == "UInt32" or type == "Float64"): + return data[0] + else: # everything else, return the cdata, to keep it alive + return data + + @property + def device(self): + return self._get_property( + _cac.kAudioOutputUnitProperty_CurrentDevice, + _cac.kAudioUnitScope_Global, 0, "UInt32") + + @device.setter + def device(self, dev): + data = _ffi.new("UInt32*", dev) + self._set_property( + _cac.kAudioOutputUnitProperty_CurrentDevice, + _cac.kAudioUnitScope_Global, 0, data) + + @property + def enableinput(self): + return self._get_property( + _cac.kAudioOutputUnitProperty_EnableIO, + _cac.kAudioUnitScope_Input, 1, "UInt32") + + @enableinput.setter + def enableinput(self, yesno): + data = _ffi.new("UInt32*", yesno) + self._set_property( + _cac.kAudioOutputUnitProperty_EnableIO, + _cac.kAudioUnitScope_Input, 1, data) + + @property + def enableoutput(self): + return self._get_property( + _cac.kAudioOutputUnitProperty_EnableIO, + _cac.kAudioUnitScope_Output, 0, "UInt32") + + @enableoutput.setter + def enableoutput(self, yesno): + data = _ffi.new("UInt32*", yesno) + self._set_property( + _cac.kAudioOutputUnitProperty_EnableIO, + _cac.kAudioUnitScope_Output, 0, data) + + @property + def samplerate(self): + return self._get_property( + _cac.kAudioUnitProperty_SampleRate, + self._au_scope, self._au_element, "Float64") + + @samplerate.setter + def samplerate(self, samplerate): + data = _ffi.new("Float64*", samplerate) + self._set_property( + _cac.kAudioUnitProperty_SampleRate, + self._au_scope, self._au_element, data) + + def _set_channels(self, channels): + streamformat = _ffi.new( + "AudioStreamBasicDescription*", + dict(mSampleRate=self.samplerate, + mFormatID=_cac.kAudioFormatLinearPCM, + mFormatFlags=_cac.kAudioFormatFlagIsFloat, + mFramesPerPacket=1, + mChannelsPerFrame=channels, + mBitsPerChannel=32, + mBytesPerPacket=channels * 4, + mBytesPerFrame=channels * 4)) + self._set_property( + _cac.kAudioUnitProperty_StreamFormat, + self._au_scope, self._au_element, streamformat) + + @property + def maxblocksize(self): + maxblocksize = self._get_property( + _cac.kAudioUnitProperty_MaximumFramesPerSlice, + _cac.kAudioUnitScope_Global, 0, "UInt32") + assert maxblocksize + return maxblocksize + + @property + def channelmap(self): + scope = {2: 1, 1: 2}[self._au_scope] + map = self._get_property( + _cac.kAudioOutputUnitProperty_ChannelMap, + scope, self._au_element, + "SInt32") + last_meaningful = max(idx for idx, c in enumerate(map) if c != -1) + return list(map[0:last_meaningful+1]) + + @channelmap.setter + def channelmap(self, map): + scope = {2: 1, 1: 2}[self._au_scope] + cmap = _ffi.new("SInt32[]", map) + self._set_property( + _cac.kAudioOutputUnitProperty_ChannelMap, + scope, self._au_element, + cmap) + + @property + def blocksizerange(self): + framesizerange = _CoreAudio.get_property( + self.device, + _cac.kAudioDevicePropertyBufferFrameSizeRange, + 'AudioValueRange', scope=_cac.kAudioObjectPropertyScopeOutput) + assert framesizerange + return framesizerange[0].mMinimum, framesizerange[0].mMaximum + + @property + def blocksize(self): + framesize = _CoreAudio.get_property( + self.device, + _cac.kAudioDevicePropertyBufferFrameSize, + 'UInt32', scope=_cac.kAudioObjectPropertyScopeOutput) + assert framesize + return framesize[0] + + @blocksize.setter + def blocksize(self, blocksize): + framesize = _ffi.new("UInt32*", blocksize) + status = _CoreAudio.set_property( + self.device, + _cac.kAudioDevicePropertyBufferFrameSize, + framesize, scope=_cac.kAudioObjectPropertyScopeOutput) + + def set_callback(self, callback): + """Set a callback function for the AudioUnit. """ + + if self._iotype == 'input': + callbacktype = _cac.kAudioOutputUnitProperty_SetInputCallback + elif self._iotype == 'output': + callbacktype = _cac.kAudioUnitProperty_SetRenderCallback + + self._callback = callback + callbackstruct = _ffi.new( + "AURenderCallbackStruct*", + dict(inputProc=callback, + inputProcRefCon=_ffi.NULL)) + self._set_property( + callbacktype, + _cac.kAudioUnitScope_Global, 0, callbackstruct) + + def start(self): + """Start processing audio, and start calling the callback.""" + + status = _au.AudioUnitInitialize(self.ptr[0]) + if status: + raise RuntimeError(_cac.error_number_to_string(status)) + status = _au.AudioOutputUnitStart(self.ptr[0]) + if status: + raise RuntimeError(_cac.error_number_to_string(status)) + + def close(self): + """Stop processing audio, and stop calling the callback.""" + + status = _au.AudioOutputUnitStop(self.ptr[0]) + if status: + raise RuntimeError(_cac.error_number_to_string(status)) + status = _au.AudioComponentInstanceDispose(self.ptr[0]) + if status: + raise RuntimeError(_cac.error_number_to_string(status)) + del self.ptr + + +# Here's how to do it: http://atastypixel.com/blog/using-remoteio-audio-unit/ +# https://developer.apple.com/library/content/technotes/tn2091/_index.html + + +class _Resampler: + def __init__(self, fromsamplerate, tosamplerate, channels): + self.fromsamplerate = fromsamplerate + self.tosamplerate = tosamplerate + self.channels = channels + + fromstreamformat = _ffi.new( + "AudioStreamBasicDescription*", + dict(mSampleRate=self.fromsamplerate, + mFormatID=_cac.kAudioFormatLinearPCM, + mFormatFlags=_cac.kAudioFormatFlagIsFloat, + mFramesPerPacket=1, + mChannelsPerFrame=self.channels, + mBitsPerChannel=32, + mBytesPerPacket=self.channels * 4, + mBytesPerFrame=self.channels * 4)) + + tostreamformat = _ffi.new( + "AudioStreamBasicDescription*", + dict(mSampleRate=self.tosamplerate, + mFormatID=_cac.kAudioFormatLinearPCM, + mFormatFlags=_cac.kAudioFormatFlagIsFloat, + mFramesPerPacket=1, + mChannelsPerFrame=self.channels, + mBitsPerChannel=32, + mBytesPerPacket=self.channels * 4, + mBytesPerFrame=self.channels * 4)) + + self.audioconverter = _ffi.new("AudioConverterRef*") + _au.AudioConverterNew(fromstreamformat, tostreamformat, self.audioconverter) + + @_ffi.callback("AudioConverterComplexInputDataProc") + def converter_callback(converter, numberpackets, bufferlist, desc, userdata): + return self.converter_callback(converter, numberpackets, bufferlist, desc, userdata) + self._converter_callback = converter_callback + + self.queue = [] + + self.blocksize = 512 + self.outbuffer = _ffi.new("AudioBufferList*", [1, 1]) + self.outbuffer.mNumberBuffers = 1 + self.outbuffer.mBuffers[0].mNumberChannels = self.channels + self.outbuffer.mBuffers[0].mDataByteSize = self.blocksize*4*self.channels + self.outdata = _ffi.new("Float32[]", self.blocksize*self.channels) + self.outbuffer.mBuffers[0].mData = self.outdata + self.outsize = _ffi.new("UInt32*") + + def converter_callback(self, converter, numberpackets, bufferlist, desc, userdata): + numframes = min(numberpackets[0], len(self.todo), self.blocksize) + raw_data = self.todo[:numframes].tobytes() + _ffi.memmove(self.outdata, raw_data, len(raw_data)) + bufferlist[0].mBuffers[0].mDataByteSize = len(raw_data) + bufferlist[0].mBuffers[0].mData = self.outdata + numberpackets[0] = numframes + self.todo = self.todo[numframes:] + + if len(self.todo) == 0 and numframes == 0: + return -1 + return 0 + + def resample(self, data): + self.todo = numpy.array(data, dtype='float32') + while len(self.todo) > 0: + self.outsize[0] = self.blocksize + # Set outbuffer each iteration to avoid mDataByteSize decreasing over time + self.outbuffer.mNumberBuffers = 1 + self.outbuffer.mBuffers[0].mNumberChannels = self.channels + self.outbuffer.mBuffers[0].mDataByteSize = self.blocksize*4*self.channels + self.outbuffer.mBuffers[0].mData = self.outdata + + status = _au.AudioConverterFillComplexBuffer(self.audioconverter[0], + self._converter_callback, + _ffi.NULL, + self.outsize, + self.outbuffer, + _ffi.NULL) + + if status != 0 and status != -1: + raise RuntimeError('error during sample rate conversion:', status) + + array = numpy.frombuffer(_ffi.buffer(self.outdata), dtype='float32').copy() + + self.queue.append(array[:self.outsize[0]*self.channels]) + + converted_data = numpy.concatenate(self.queue) + self.queue.clear() + + return converted_data.reshape([-1, self.channels]) + + def __del__(self): + _au.AudioConverterDispose(self.audioconverter[0]) + + +class _Recorder: + """A context manager for an active input stream. + + Audio recording is available as soon as the context manager is + entered. Recorded audio data can be read using the `record` + method. If no audio data is available, `record` will block until + the requested amount of audio data has been recorded. + + This context manager can only be entered once, and can not be used + after it is closed. + + """ + + def __init__(self, id, samplerate, channels, blocksize=None): + self._au = _AudioUnit("input", id, samplerate, channels, blocksize) + self._resampler = _Resampler(self._au.samplerate, samplerate, self._au.channels) + self._record_event = threading.Event() + + def __enter__(self): + self._queue = collections.deque() + self._pending_chunk = numpy.zeros([0, self._au.channels], dtype='float32') + + channels = self._au.channels + au = self._au.ptr[0] + + @_ffi.callback("AURenderCallback") + def input_callback(userdata, actionflags, timestamp, + busnumber, numframes, bufferlist): + bufferlist = _ffi.new("AudioBufferList*", [1, 1]) + bufferlist.mNumberBuffers = 1 + bufferlist.mBuffers[0].mNumberChannels = channels + bufferlist.mBuffers[0].mDataByteSize = numframes * 4 * channels + bufferlist.mBuffers[0].mData = _ffi.NULL + + status = _au.AudioUnitRender(au, + actionflags, + timestamp, + busnumber, + numframes, + bufferlist) + + # special case if output is silence: + if (actionflags[0] == _cac.kAudioUnitRenderAction_OutputIsSilence + and status == _cac.kAudioUnitErr_CannotDoInCurrentContext): + actionflags[0] = 0 # reset actionflags + status = 0 # reset error code + data = numpy.zeros([numframes, channels], 'float32') + else: + data = numpy.frombuffer(_ffi.buffer(bufferlist.mBuffers[0].mData, + bufferlist.mBuffers[0].mDataByteSize), + dtype='float32') + data = data.reshape([-1, bufferlist.mBuffers[0].mNumberChannels]).copy() + + if status != 0: + print('error during recording:', status) + + self._queue.append(data) + self._record_event.set() + return status + + self._au.set_callback(input_callback) + self._au.start() + + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._au.close() + + def _record_chunk(self): + """Record one chunk of audio data, as returned by core audio + + The data will be returned as a 1D numpy array, which will be used by + the `record` method. This function is the interface of the `_Recorder` + object with core audio. + """ + while not self._queue: + self._record_event.wait() + self._record_event.clear() + block = self._queue.popleft() + + # perform sample rate conversion: + if self._au.resample != 1: + block = self._resampler.resample(block) + return block + + def record(self, numframes=None): + """Record a block of audio data. + + The data will be returned as a frames × channels float32 numpy array. + This function will wait until numframes frames have been recorded. + If numframes is given, it will return exactly `numframes` frames, + and buffer the rest for later. + + If numframes is None, it will return whatever the audio backend + has available right now. + Use this if latency must be kept to a minimum, but be aware that + block sizes can change at the whims of the audio backend. + + If using `record` with `numframes=None` after using `record` with a + required `numframes`, the last buffered frame will be returned along + with the new recorded block. + (If you want to empty the last buffered frame instead, use `flush`) + + """ + + if numframes is None: + blocks = [self._pending_chunk, self._record_chunk()] + self._pending_chunk = numpy.zeros([0, self._au.channels], dtype='float32') + else: + blocks = [self._pending_chunk] + self._pending_chunk = numpy.zeros([0, self._au.channels], dtype='float32') + recorded_frames = len(blocks[0]) + while recorded_frames < numframes: + block = self._record_chunk() + blocks.append(block) + recorded_frames += len(block) + if recorded_frames > numframes: + to_split = -(recorded_frames-numframes) + blocks[-1], self._pending_chunk = numpy.split(blocks[-1], [to_split]) + data = numpy.concatenate(blocks, axis=0) + + return data + + def flush(self): + """Return the last pending chunk + After using the record method, this will return the last incomplete + chunk and delete it. + + """ + last_chunk = numpy.reshape(self._pending_chunk, [-1, self._au.channels]) + self._pending_chunk = numpy.zeros([0, self._au.channels], dtype='float32') + return last_chunk diff --git a/LXST/Platforms/linux/__init__.py b/LXST/Platforms/linux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/LXST/Platforms/linux/pulseaudio.h b/LXST/Platforms/linux/pulseaudio.h new file mode 100644 index 0000000..2d82ee9 --- /dev/null +++ b/LXST/Platforms/linux/pulseaudio.h @@ -0,0 +1,419 @@ + +typedef enum pa_stream_direction { + PA_STREAM_NODIRECTION, + PA_STREAM_PLAYBACK, + PA_STREAM_RECORD, + PA_STREAM_UPLOAD +} pa_stream_direction_t; + +typedef enum pa_sample_format { + PA_SAMPLE_U8, + PA_SAMPLE_ALAW, + PA_SAMPLE_ULAW, + PA_SAMPLE_S16LE, + PA_SAMPLE_S16BE, + PA_SAMPLE_FLOAT32LE, + PA_SAMPLE_FLOAT32BE, + PA_SAMPLE_S32LE, + PA_SAMPLE_S32BE, + PA_SAMPLE_S24LE, + PA_SAMPLE_S24BE, + PA_SAMPLE_S24_32LE, + PA_SAMPLE_S24_32BE, + PA_SAMPLE_MAX, + PA_SAMPLE_INVALID = -1 +} pa_sample_format_t; + +typedef struct pa_sample_spec { + pa_sample_format_t format; + uint32_t rate; + uint8_t channels; +} pa_sample_spec; + +typedef enum pa_channel_position { + PA_CHANNEL_POSITION_INVALID = -1, + PA_CHANNEL_POSITION_MONO = 0, + + PA_CHANNEL_POSITION_FRONT_LEFT, + PA_CHANNEL_POSITION_FRONT_RIGHT, + PA_CHANNEL_POSITION_FRONT_CENTER, + PA_CHANNEL_POSITION_LEFT = PA_CHANNEL_POSITION_FRONT_LEFT, + PA_CHANNEL_POSITION_RIGHT = PA_CHANNEL_POSITION_FRONT_RIGHT, + PA_CHANNEL_POSITION_CENTER = PA_CHANNEL_POSITION_FRONT_CENTER, + PA_CHANNEL_POSITION_REAR_CENTER, + PA_CHANNEL_POSITION_REAR_LEFT, + PA_CHANNEL_POSITION_REAR_RIGHT, + PA_CHANNEL_POSITION_LFE, + PA_CHANNEL_POSITION_SUBWOOFER = PA_CHANNEL_POSITION_LFE, + PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER, + PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER, + PA_CHANNEL_POSITION_SIDE_LEFT, + PA_CHANNEL_POSITION_SIDE_RIGHT, + PA_CHANNEL_POSITION_AUX0, + PA_CHANNEL_POSITION_AUX1, + PA_CHANNEL_POSITION_AUX2, + PA_CHANNEL_POSITION_AUX3, + PA_CHANNEL_POSITION_AUX4, + PA_CHANNEL_POSITION_AUX5, + PA_CHANNEL_POSITION_AUX6, + PA_CHANNEL_POSITION_AUX7, + PA_CHANNEL_POSITION_AUX8, + PA_CHANNEL_POSITION_AUX9, + PA_CHANNEL_POSITION_AUX10, + PA_CHANNEL_POSITION_AUX11, + PA_CHANNEL_POSITION_AUX12, + PA_CHANNEL_POSITION_AUX13, + PA_CHANNEL_POSITION_AUX14, + PA_CHANNEL_POSITION_AUX15, + PA_CHANNEL_POSITION_AUX16, + PA_CHANNEL_POSITION_AUX17, + PA_CHANNEL_POSITION_AUX18, + PA_CHANNEL_POSITION_AUX19, + PA_CHANNEL_POSITION_AUX20, + PA_CHANNEL_POSITION_AUX21, + PA_CHANNEL_POSITION_AUX22, + PA_CHANNEL_POSITION_AUX23, + PA_CHANNEL_POSITION_AUX24, + PA_CHANNEL_POSITION_AUX25, + PA_CHANNEL_POSITION_AUX26, + PA_CHANNEL_POSITION_AUX27, + PA_CHANNEL_POSITION_AUX28, + PA_CHANNEL_POSITION_AUX29, + PA_CHANNEL_POSITION_AUX30, + PA_CHANNEL_POSITION_AUX31, + + PA_CHANNEL_POSITION_TOP_CENTER, + PA_CHANNEL_POSITION_TOP_FRONT_LEFT, + PA_CHANNEL_POSITION_TOP_FRONT_RIGHT, + PA_CHANNEL_POSITION_TOP_FRONT_CENTER, + PA_CHANNEL_POSITION_TOP_REAR_LEFT, + PA_CHANNEL_POSITION_TOP_REAR_RIGHT, + PA_CHANNEL_POSITION_TOP_REAR_CENTER, + PA_CHANNEL_POSITION_MAX +} pa_channel_position_t; + +#define PA_CHANNELS_MAX 32U + +typedef struct pa_channel_map { + uint8_t channels; + pa_channel_position_t map[PA_CHANNELS_MAX]; +} pa_channel_map; + +typedef enum pa_channel_map_def { + PA_CHANNEL_MAP_AIFF, + PA_CHANNEL_MAP_ALSA, + PA_CHANNEL_MAP_AUX, + PA_CHANNEL_MAP_WAVEEX, + PA_CHANNEL_MAP_OSS, + PA_CHANNEL_MAP_DEF_MAX, + PA_CHANNEL_MAP_DEFAULT = PA_CHANNEL_MAP_AIFF +} pa_channel_map_def_t; + +pa_channel_map* pa_channel_map_init_extend(pa_channel_map *m, unsigned channels, pa_channel_map_def_t def); +int pa_channel_map_valid(const pa_channel_map *map); +const char* pa_channel_position_to_string(pa_channel_position_t pos); + +typedef struct pa_buffer_attr { + uint32_t maxlength; + uint32_t tlength; + uint32_t prebuf; + uint32_t minreq; + uint32_t fragsize; +} pa_buffer_attr; + +typedef struct pa_simple pa_simple; + +pa_simple* pa_simple_new( + const char *server, + const char *name, + pa_stream_direction_t dir, + const char *dev, + const char *stream_name, + const pa_sample_spec *ss, + const pa_channel_map *map, + const pa_buffer_attr *attr, + int *error + ); + +typedef struct pa_mainloop pa_mainloop; +pa_mainloop *pa_mainloop_new(void); +void pa_mainloop_free(pa_mainloop* m); +int pa_mainloop_run(pa_mainloop *m, int *retval); +void pa_mainloop_quit(pa_mainloop *m, int retval); + +typedef struct pa_threaded_mainloop pa_threaded_mainloop; +pa_threaded_mainloop *pa_threaded_mainloop_new(void); +int pa_threaded_mainloop_start(pa_threaded_mainloop *m); +void pa_threaded_mainloop_stop(pa_threaded_mainloop *m); +void pa_threaded_mainloop_free(pa_threaded_mainloop *m); +void pa_threaded_mainloop_lock(pa_threaded_mainloop *m); +void pa_threaded_mainloop_unlock(pa_threaded_mainloop *m); + +typedef struct pa_mainloop_api pa_mainloop_api; +pa_mainloop_api* pa_mainloop_get_api(pa_mainloop*m); +pa_mainloop_api *pa_threaded_mainloop_get_api(pa_threaded_mainloop *m); + +typedef struct pa_context pa_context; +pa_context *pa_context_new(pa_mainloop_api *mainloop, const char *name); +void pa_context_unref(pa_context *c); +typedef enum pa_context_flags {PA_CONTEXT_NOFLAGS = 0} pa_context_flags_t; +typedef struct pa_spawn_api pa_spawn_api; +int pa_context_connect(pa_context *c, const char *server, pa_context_flags_t flags, const pa_spawn_api *api); +void pa_context_disconnect(pa_context *c); +int pa_context_errno(const pa_context *c); +typedef enum pa_context_state { + PA_CONTEXT_UNCONNECTED, + PA_CONTEXT_CONNECTING, + PA_CONTEXT_AUTHORIZING, + PA_CONTEXT_SETTING_NAME, + PA_CONTEXT_READY, + PA_CONTEXT_FAILED, + PA_CONTEXT_TERMINATED +} pa_context_state_t; +pa_context_state_t pa_context_get_state(pa_context *c); + +typedef struct pa_operation pa_operation; +pa_operation *pa_operation_ref(pa_operation *o); +void pa_operation_unref(pa_operation *o); +typedef enum pa_operation_state { + PA_OPERATION_RUNNING, + PA_OPERATION_DONE, + PA_OPERATION_CANCELLED +} pa_operation_state_t; +pa_operation_state_t pa_operation_get_state(pa_operation *o); + +typedef enum pa_sink_state { /* enum serialized in u8 */ + PA_SINK_INVALID_STATE = -1, + PA_SINK_RUNNING = 0, + PA_SINK_IDLE = 1, + PA_SINK_SUSPENDED = 2 +} pa_sink_state_t; + +typedef struct pa_proplist pa_proplist; +const char *pa_proplist_gets(pa_proplist *p, const char *key); + +typedef enum pa_encoding { + PA_ENCODING_ANY, + PA_ENCODING_PCM, + PA_ENCODING_AC3_IEC61937, + PA_ENCODING_EAC3_IEC61937, + PA_ENCODING_MPEG_IEC61937, + PA_ENCODING_DTS_IEC61937, + PA_ENCODING_MPEG2_AAC_IEC61937, + PA_ENCODING_MAX, + PA_ENCODING_INVALID = -1, +} pa_encoding_t; + +typedef struct pa_format_info { + pa_encoding_t encoding; + pa_proplist *plist; +} pa_format_info; + +typedef struct pa_sink_port_info { + const char *name; + const char *description; + uint32_t priority; + int available; +} pa_sink_port_info; + +typedef uint32_t pa_volume_t; +typedef struct pa_cvolume { + uint8_t channels; + pa_volume_t values[PA_CHANNELS_MAX]; +} pa_cvolume; + +typedef uint64_t pa_usec_t; + +typedef enum pa_sink_flags { + PA_SINK_NOFLAGS = 0x0000, + PA_SINK_HW_VOLUME_CTRL = 0x0001, + PA_SINK_LATENCY = 0x0002, + PA_SINK_HARDWARE = 0x0004, + PA_SINK_NETWORK = 0x0008, + PA_SINK_HW_MUTE_CTRL = 0x0010, + PA_SINK_DECIBEL_VOLUME = 0x0020, + PA_SINK_FLAT_VOLUME = 0x0040, + PA_SINK_DYNAMIC_LATENCY = 0x0080, + PA_SINK_SET_FORMATS = 0x0100 +} pa_sink_flags_t; + +typedef struct pa_sink_info { + const char *name; + uint32_t index; + const char *description; + pa_sample_spec sample_spec; + pa_channel_map channel_map; + uint32_t owner_module; + pa_cvolume volume; + int mute; + uint32_t monitor_source; + const char *monitor_source_name; + pa_usec_t latency; + const char *driver; + pa_sink_flags_t flags; + pa_proplist *proplist; + pa_usec_t configured_latency; + pa_volume_t base_volume; + pa_sink_state_t state; + uint32_t n_volume_steps; + uint32_t card; + uint32_t n_ports; + pa_sink_port_info** ports; + pa_sink_port_info* active_port; + uint8_t n_formats; + pa_format_info **formats; +} pa_sink_info; + +typedef struct pa_source_port_info { + const char *name; + const char *description; + uint32_t priority; + int available; +} pa_source_port_info; + +typedef enum pa_source_flags { + PA_SOURCE_NOFLAGS = 0x0000, + PA_SOURCE_HW_VOLUME_CTRL = 0x0001, + PA_SOURCE_LATENCY = 0x0002, + PA_SOURCE_HARDWARE = 0x0004, + PA_SOURCE_NETWORK = 0x0008, + PA_SOURCE_HW_MUTE_CTRL = 0x0010, + PA_SOURCE_DECIBEL_VOLUME = 0x0020, + PA_SOURCE_DYNAMIC_LATENCY = 0x0040, + PA_SOURCE_FLAT_VOLUME = 0x0080 +} pa_source_flags_t; + +typedef enum pa_source_state { + PA_SOURCE_INVALID_STATE = -1, + PA_SOURCE_RUNNING = 0, + PA_SOURCE_IDLE = 1, + PA_SOURCE_SUSPENDED = 2 +} pa_source_state_t; + +typedef struct pa_source_info { + const char *name; + uint32_t index; + const char *description; + pa_sample_spec sample_spec; + pa_channel_map channel_map; + uint32_t owner_module; + pa_cvolume volume; + int mute; + uint32_t monitor_of_sink; + const char *monitor_of_sink_name; + pa_usec_t latency; + const char *driver; + pa_source_flags_t flags; // + pa_proplist *proplist; + pa_usec_t configured_latency; + pa_volume_t base_volume; + pa_source_state_t state; // + uint32_t n_volume_steps; + uint32_t card; + uint32_t n_ports; + pa_source_port_info** ports; + pa_source_port_info* active_port; + uint8_t n_formats; + pa_format_info **formats; +} pa_source_info; + +typedef void (*pa_sink_info_cb_t)(pa_context *c, const pa_sink_info *i, int eol, void *userdata); +pa_operation* pa_context_get_sink_info_list(pa_context *c, pa_sink_info_cb_t cb, void *userdata); +pa_operation* pa_context_get_sink_info_by_name(pa_context *c, const char *name, pa_sink_info_cb_t cb, void *userdata); +typedef void (*pa_source_info_cb_t)(pa_context *c, const pa_source_info *i, int eol, void *userdata); +pa_operation* pa_context_get_source_info_list(pa_context *c, pa_source_info_cb_t cb, void *userdata); +pa_operation* pa_context_get_source_info_by_name(pa_context *c, const char *name, pa_source_info_cb_t cb, void *userdata); +typedef void (*pa_context_notify_cb)(pa_context *c, void *userdata); +pa_operation* pa_context_drain(pa_context *c, pa_context_notify_cb cb, void *userdata); +typedef void (*pa_context_success_cb_t)(pa_context *c, int success, void *userdata); +pa_operation* pa_context_set_name(pa_context *c, const char *name, pa_context_success_cb_t cb, void *userdata); +uint32_t pa_context_get_index(const pa_context *s); + +typedef struct pa_client_info { + uint32_t index; + const char *name; + uint32_t owner_module; + const char *driver; + pa_proplist *proplist; +} pa_client_info; +typedef void (*pa_client_info_cb_t) (pa_context *c, const pa_client_info*i, int eol, void *userdata); +pa_operation* pa_context_get_client_info(pa_context *c, uint32_t idx, pa_client_info_cb_t cb, void *userdata); + +typedef struct pa_server_info { + const char *user_name; + const char *host_name; + const char *server_version; + const char *server_name; + pa_sample_spec sample_spec; + const char *default_sink_name; + const char *default_source_name; + uint32_t cookie; + pa_channel_map channel_map; +} pa_server_info; +typedef void (*pa_server_info_cb_t) (pa_context *c, const pa_server_info*i, void *userdata); +pa_operation* pa_context_get_server_info(pa_context *c, pa_server_info_cb_t cb, void *userdata); + +int pa_sample_spec_valid(const pa_sample_spec *spec); + +typedef struct pa_stream pa_stream; +pa_stream* pa_stream_new(pa_context *c, const char *name, const pa_sample_spec *ss, const pa_channel_map *map); +void pa_stream_unref(pa_stream *s); + +typedef enum pa_stream_flags { + PA_STREAM_NOFLAGS = 0x0000, + PA_STREAM_START_CORKED = 0x0001, + PA_STREAM_INTERPOLATE_TIMING = 0x0002, + PA_STREAM_NOT_MONOTONIC = 0x0004, + PA_STREAM_AUTO_TIMING_UPDATE = 0x0008, + PA_STREAM_NO_REMAP_CHANNELS = 0x0010, + PA_STREAM_NO_REMIX_CHANNELS = 0x0020, + PA_STREAM_FIX_FORMAT = 0x0040, + PA_STREAM_FIX_RATE = 0x0080, + PA_STREAM_FIX_CHANNELS = 0x0100, + PA_STREAM_DONT_MOVE = 0x0200, + PA_STREAM_VARIABLE_RATE = 0x0400, + PA_STREAM_PEAK_DETECT = 0x0800, + PA_STREAM_START_MUTED = 0x1000, + PA_STREAM_ADJUST_LATENCY = 0x2000, + PA_STREAM_EARLY_REQUESTS = 0x4000, + PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND = 0x8000, + PA_STREAM_START_UNMUTED = 0x10000, + PA_STREAM_FAIL_ON_SUSPEND = 0x20000, + PA_STREAM_RELATIVE_VOLUME = 0x40000, + PA_STREAM_PASSTHROUGH = 0x80000 +} pa_stream_flags_t; +int pa_stream_connect_playback(pa_stream *s, const char *dev, const pa_buffer_attr *attr, pa_stream_flags_t flags, const pa_cvolume *volume, pa_stream *sync_stream); +int pa_stream_connect_record(pa_stream *s, const char *dev, const pa_buffer_attr *attr, pa_stream_flags_t flags); +int pa_stream_disconnect(pa_stream *s); +typedef void (*pa_stream_success_cb_t) (pa_stream*s, int success, void *userdata); +pa_operation* pa_stream_cork(pa_stream *s, int b, pa_stream_success_cb_t cb, void *userdata); +pa_operation* pa_stream_drain(pa_stream *s, pa_stream_success_cb_t cb, void *userdata); +size_t pa_stream_writable_size(pa_stream *p); +size_t pa_stream_readable_size(pa_stream *p); +typedef void (*pa_free_cb_t)(void *p); +typedef enum pa_seek_mode { + PA_SEEK_RELATIVE = 0, + PA_SEEK_ABSOLUTE = 1, + PA_SEEK_RELATIVE_ON_READ = 2, + PA_SEEK_RELATIVE_END = 3 +} pa_seek_mode_t; +int pa_stream_write(pa_stream *p, const void *data, size_t nbytes, pa_free_cb_t free_cb, int64_t offset, pa_seek_mode_t seek); +int pa_stream_peek(pa_stream *p, const void **data, size_t *nbytes); +int pa_stream_drop(pa_stream *p); +int pa_stream_get_latency(pa_stream *s, pa_usec_t *r_usec, int *negative); +const pa_channel_map* pa_stream_get_channel_map(pa_stream *s); +const pa_buffer_attr* pa_stream_get_buffer_attr(pa_stream *s); + +typedef enum pa_stream_state { + PA_STREAM_UNCONNECTED, + PA_STREAM_CREATING, + PA_STREAM_READY, + PA_STREAM_FAILED, + PA_STREAM_TERMINATED +} pa_stream_state_t; +pa_stream_state_t pa_stream_get_state(pa_stream *p); + +typedef void(*pa_stream_request_cb_t)(pa_stream *p, size_t nbytes, void *userdata); +void pa_stream_set_read_callback(pa_stream *p, pa_stream_request_cb_t cb, void *userdata); + +pa_operation* pa_stream_update_timing_info(pa_stream *s, pa_stream_success_cb_t cb, void *userdata); diff --git a/LXST/Platforms/linux/soundcard.py b/LXST/Platforms/linux/soundcard.py new file mode 100644 index 0000000..d628e69 --- /dev/null +++ b/LXST/Platforms/linux/soundcard.py @@ -0,0 +1,944 @@ +# Adapted from Bastian Bechtold's soundcard library, originally released +# under the BSD 3-Clause License +# +# https://github.com/bastibe/SoundCard +# +# Copyright (c) 2016 Bastian Bechtold +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Modifications and improvements Copyright 2025 Mark Qvist, and released +# under the same BSD 3-Clause License. + +import os +import atexit +import collections.abc +import time +import re +import threading +import warnings +import numpy +import cffi + +_ffi = cffi.FFI() +_package_dir, _ = os.path.split(__file__) +with open(os.path.join(_package_dir, "pulseaudio.h"), "rt") as f: _ffi.cdef(f.read()) + +# Try explicit file name, if the general does not work (e.g. on nixos) +try: _pa = _ffi.dlopen("pulse") +except OSError: _pa = _ffi.dlopen("libpulse.so") + +# First, we need to define a global _PulseAudio proxy for interacting +# with the C API: + +def _lock(func): + """Call a pulseaudio function while holding the mainloop lock.""" + def func_with_lock(*args, **kwargs): + self = args[0] + with self._lock_mainloop(): + return func(*args[1:], **kwargs) + return func_with_lock + + +def _lock_and_block(func): + """Call a pulseaudio function while holding the mainloop lock, and + block until the operation has finished. + + Use this for pulseaudio functions that return a `pa_operation *`. + + """ + def func_with_lock(*args, **kwargs): + self = args[0] + with self._lock_mainloop(): + operation = func(*args[1:], **kwargs) + self._block_operation(operation) + self._pa_operation_unref(operation) + return func_with_lock + + +def channel_name_map(): + """ + Return a dict containing the channel position index for every channel position name string. + """ + + channel_indices = { + _ffi.string(_pa.pa_channel_position_to_string(idx)).decode("utf-8"): idx for idx in + range(_pa.PA_CHANNEL_POSITION_MAX) + } + + # Append alternative names for front-left, front-right, front-center and lfe according to + # the PulseAudio definitions. + channel_indices.update({"left": _pa.PA_CHANNEL_POSITION_LEFT, + "right": _pa.PA_CHANNEL_POSITION_RIGHT, + "center": _pa.PA_CHANNEL_POSITION_CENTER, + "subwoofer": _pa.PA_CHANNEL_POSITION_SUBWOOFER}) + + # The values returned from Pulseaudio contain 1 for "left", 2 for "right" and so on. + # SoundCard"s channel indices for "left" start at 0. Therefore, we have to decrement all values. + channel_indices = {key: value - 1 for (key, value) in channel_indices.items()} + + return channel_indices + + +class _PulseAudio: + """Proxy for communcation with Pulseaudio. + + This holds the pulseaudio main loop, and a pulseaudio context. + Together, these provide the building blocks for interacting with + pulseaudio. + + This can be used to query the pulseaudio server for sources, + sinks, and server information, and provides thread-safe access to + the main pulseaudio functions. + + Any function that would return a `pa_operation *` in pulseaudio + will block until the operation has finished. + + """ + + def __init__(self): + # these functions are called before the mainloop starts, so we + # don't need to hold the lock: + self.mainloop = _pa.pa_threaded_mainloop_new() + self.mainloop_api = _pa.pa_threaded_mainloop_get_api(self.mainloop) + self.context = _pa.pa_context_new(self.mainloop_api, self._infer_program_name().encode()) + _pa.pa_context_connect(self.context, _ffi.NULL, _pa.PA_CONTEXT_NOFLAGS, _ffi.NULL) + _pa.pa_threaded_mainloop_start(self.mainloop) + + while self._pa_context_get_state(self.context) in (_pa.PA_CONTEXT_UNCONNECTED, _pa.PA_CONTEXT_CONNECTING, _pa.PA_CONTEXT_AUTHORIZING, _pa.PA_CONTEXT_SETTING_NAME): + time.sleep(0.001) + assert self._pa_context_get_state(self.context)==_pa.PA_CONTEXT_READY + + @staticmethod + def _infer_program_name(): + """Get current progam name. + + Will handle `./script.py`, `python path/to/script.py`, + `python -m module.submodule` and `python -c "code(x=y)"`. + See https://docs.python.org/3/using/cmdline.html#interface-options + """ + import sys + prog_name = sys.argv[0] + if prog_name == "-c": + return sys.argv[1][:30] + "..." + if prog_name == "-m": + prog_name = sys.argv[1] + # Usually even with -m, sys.argv[0] will already be a path, + # so do the following outside the above check + main_str = "/__main__.py" + if prog_name.endswith(main_str): + prog_name = prog_name[:-len(main_str)] + # Not handled: sys.argv[0] == "-" + return os.path.basename(prog_name) + + def _shutdown(self): + operation = self._pa_context_drain(self.context, _ffi.NULL, _ffi.NULL) + self._block_operation(operation) + self._pa_context_disconnect(self.context) + self._pa_context_unref(self.context) + # no more mainloop locking necessary from here on: + _pa.pa_threaded_mainloop_stop(self.mainloop) + _pa.pa_threaded_mainloop_free(self.mainloop) + + def _block_operation(self, operation): + """Wait until the operation has finished.""" + if operation == _ffi.NULL: + return + while self._pa_operation_get_state(operation) == _pa.PA_OPERATION_RUNNING: + time.sleep(0.001) + + @property + def name(self): + """Return application name stored in client proplist""" + idx = self._pa_context_get_index(self.context) + if idx < 0: # PA_INVALID_INDEX == -1 + raise RuntimeError("Could not get client index of PulseAudio context.") + name = None + @_ffi.callback("pa_client_info_cb_t") + def callback(context, client_info, eol, userdata): + nonlocal name + if not eol: + name = _ffi.string(client_info.name).decode("utf-8") + self._pa_context_get_client_info(self.context, idx, callback, _ffi.NULL) + assert name is not None + return name + + @name.setter + def name(self, name): + rv = None + @_ffi.callback("pa_context_success_cb_t") + def callback(context, success, userdata): + nonlocal rv + rv = success + self._pa_context_set_name(self.context, name.encode(), callback, _ffi.NULL) + assert rv is not None + if rv == 0: + raise RuntimeError("Setting PulseAudio context name failed") + + @property + def source_list(self): + """Return a list of dicts of information about available sources.""" + info = [] + @_ffi.callback("pa_source_info_cb_t") + def callback(context, source_info, eol, userdata): + if not eol: + info.append(dict(name=_ffi.string(source_info.description).decode("utf-8"), + id=_ffi.string(source_info.name).decode("utf-8"))) + self._pa_context_get_source_info_list(self.context, callback, _ffi.NULL) + return info + + def source_info(self, id): + """Return a dictionary of information about a specific source.""" + info = [] + @_ffi.callback("pa_source_info_cb_t") + def callback(context, source_info, eol, userdata): + if not eol: + info_dict = dict(latency=source_info.latency, + configured_latency=source_info.configured_latency, + channels=source_info.sample_spec.channels, + name=_ffi.string(source_info.description).decode("utf-8")) + for prop in ["device.class", "device.api", "device.bus"]: + data = _pa.pa_proplist_gets(source_info.proplist, prop.encode()) + info_dict[prop] = _ffi.string(data).decode("utf-8") if data else None + info.append(info_dict) + + self._pa_context_get_source_info_by_name(self.context, id.encode(), callback, _ffi.NULL) + return info[0] + + @property + def sink_list(self): + """Return a list of dicts of information about available sinks.""" + info = [] + @_ffi.callback("pa_sink_info_cb_t") + def callback(context, sink_info, eol, userdata): + if not eol: + info.append((dict(name=_ffi.string(sink_info.description).decode("utf-8"), + id=_ffi.string(sink_info.name).decode("utf-8")))) + self._pa_context_get_sink_info_list(self.context, callback, _ffi.NULL) + return info + + def sink_info(self, id): + """Return a dictionary of information about a specific sink.""" + info = [] + @_ffi.callback("pa_sink_info_cb_t") + def callback(context, sink_info, eol, userdata): + if not eol: + info_dict = dict(latency=sink_info.latency, + configured_latency=sink_info.configured_latency, + channels=sink_info.sample_spec.channels, + name=_ffi.string(sink_info.description).decode("utf-8")) + for prop in ["device.class", "device.api", "device.bus"]: + data = _pa.pa_proplist_gets(sink_info.proplist, prop.encode()) + info_dict[prop] = _ffi.string(data).decode("utf-8") if data else None + info.append(info_dict) + self._pa_context_get_sink_info_by_name(self.context, id.encode(), callback, _ffi.NULL) + return info[0] + + @property + def server_info(self): + """Return a dictionary of information about the server.""" + info = {} + @_ffi.callback("pa_server_info_cb_t") + def callback(context, server_info, userdata): + info["server version"] = _ffi.string(server_info.server_version).decode("utf-8") + info["server name"] = _ffi.string(server_info.server_name).decode("utf-8") + info["default sink id"] = _ffi.string(server_info.default_sink_name).decode("utf-8") + info["default source id"] = _ffi.string(server_info.default_source_name).decode("utf-8") + self._pa_context_get_server_info(self.context, callback, _ffi.NULL) + return info + + def _lock_mainloop(self): + """Context manager for locking the mainloop. + + Hold this lock before calling any pulseaudio function while + the mainloop is running. + + """ + + class Lock(): + def __enter__(self_): + _pa.pa_threaded_mainloop_lock(self.mainloop) + def __exit__(self_, exc_type, exc_value, traceback): + _pa.pa_threaded_mainloop_unlock(self.mainloop) + return Lock() + + # create thread-safe versions of all used pulseaudio functions: + _pa_context_get_source_info_list = _lock_and_block(_pa.pa_context_get_source_info_list) + _pa_context_get_source_info_by_name = _lock_and_block(_pa.pa_context_get_source_info_by_name) + _pa_context_get_sink_info_list = _lock_and_block(_pa.pa_context_get_sink_info_list) + _pa_context_get_sink_info_by_name = _lock_and_block(_pa.pa_context_get_sink_info_by_name) + _pa_context_get_client_info = _lock_and_block(_pa.pa_context_get_client_info) + _pa_context_get_server_info = _lock_and_block(_pa.pa_context_get_server_info) + _pa_context_get_index = _lock(_pa.pa_context_get_index) + _pa_context_get_state = _lock(_pa.pa_context_get_state) + _pa_context_set_name = _lock_and_block(_pa.pa_context_set_name) + _pa_context_drain = _lock(_pa.pa_context_drain) + _pa_context_disconnect = _lock(_pa.pa_context_disconnect) + _pa_context_unref = _lock(_pa.pa_context_unref) + _pa_context_errno = _lock(_pa.pa_context_errno) + _pa_operation_get_state = _lock(_pa.pa_operation_get_state) + _pa_operation_unref = _lock(_pa.pa_operation_unref) + _pa_stream_get_state = _lock(_pa.pa_stream_get_state) + _pa_sample_spec_valid = _lock(_pa.pa_sample_spec_valid) + _pa_stream_new = _lock(_pa.pa_stream_new) + _pa_stream_get_channel_map = _lock(_pa.pa_stream_get_channel_map) + _pa_stream_drain = _lock_and_block(_pa.pa_stream_drain) + _pa_stream_disconnect = _lock(_pa.pa_stream_disconnect) + _pa_stream_unref = _lock(_pa.pa_stream_unref) + _pa_stream_connect_record = _lock(_pa.pa_stream_connect_record) + _pa_stream_readable_size = _lock(_pa.pa_stream_readable_size) + _pa_stream_peek = _lock(_pa.pa_stream_peek) + _pa_stream_drop = _lock(_pa.pa_stream_drop) + _pa_stream_connect_playback = _lock(_pa.pa_stream_connect_playback) + _pa_stream_update_timing_info = _lock_and_block(_pa.pa_stream_update_timing_info) + _pa_stream_get_latency = _lock(_pa.pa_stream_get_latency) + _pa_stream_writable_size = _lock(_pa.pa_stream_writable_size) + _pa_stream_write = _lock(_pa.pa_stream_write) + _pa_stream_set_read_callback = _pa.pa_stream_set_read_callback + +_pulse = _PulseAudio() +atexit.register(_pulse._shutdown) + +def all_speakers(): + """A list of all connected speakers. + + Returns + ------- + speakers : list(_Speaker) + + """ + return [_Speaker(id=s["id"]) for s in _pulse.sink_list] + + +def default_speaker(): + """The default speaker of the system. + + Returns + ------- + speaker : _Speaker + + """ + name = _pulse.server_info["default sink id"] + return get_speaker(name) + + +def get_speaker(id): + """Get a specific speaker by a variety of means. + + Parameters + ---------- + id : int or str + can be a backend id string (Windows, Linux) or a device id int (MacOS), a substring of the + speaker name, or a fuzzy-matched pattern for the speaker name. + + Returns + ------- + speaker : _Speaker + + """ + speakers = _pulse.sink_list + return _Speaker(id=_match_soundcard(id, speakers)["id"]) + + +def all_microphones(include_loopback=False, exclude_monitors=True): + """A list of all connected microphones. + + By default, this does not include loopbacks (virtual microphones + that record the output of a speaker). + + Parameters + ---------- + include_loopback : bool + allow recording of speaker outputs + exclude_monitors : bool + deprecated version of ``include_loopback`` + + Returns + ------- + microphones : list(_Microphone) + + """ + + if not exclude_monitors: + warnings.warn("The exclude_monitors flag is being replaced by the include_loopback flag", DeprecationWarning) + include_loopback = not exclude_monitors + + mics = [_Microphone(id=m["id"]) for m in _pulse.source_list] + if not include_loopback: + return [m for m in mics if m._get_info()["device.class"] != "monitor"] + else: + return mics + + +def default_microphone(): + """The default microphone of the system. + + Returns + ------- + microphone : _Microphone + """ + name = _pulse.server_info["default source id"] + return get_microphone(name, include_loopback=True) + + +def get_microphone(id, include_loopback=False, exclude_monitors=True): + """Get a specific microphone by a variety of means. + + By default, this does not include loopbacks (virtual microphones + that record the output of a speaker). + + Parameters + ---------- + id : int or str + can be a backend id string (Windows, Linux) or a device id int (MacOS), a substring of the + speaker name, or a fuzzy-matched pattern for the speaker name. + include_loopback : bool + allow recording of speaker outputs + exclude_monitors : bool + deprecated version of ``include_loopback`` + + Returns + ------- + microphone : _Microphone + """ + + if not exclude_monitors: + warnings.warn("The exclude_monitors flag is being replaced by the include_loopback flag", DeprecationWarning) + include_loopback = not exclude_monitors + + microphones = _pulse.source_list + return _Microphone(id=_match_soundcard(id, microphones, include_loopback)["id"]) + + +def _match_soundcard(id, soundcards, include_loopback=False): + """Find id in a list of soundcards. + + id can be a pulseaudio id, a substring of the microphone name, or + a fuzzy-matched pattern for the microphone name. + """ + if not include_loopback: + soundcards_by_id = {soundcard["id"]: soundcard for soundcard in soundcards + if not "monitor" in soundcard["id"]} + soundcards_by_name = {soundcard["name"]: soundcard for soundcard in soundcards + if not "monitor" in soundcard["id"]} + else: + soundcards_by_id = {soundcard["id"]: soundcard for soundcard in soundcards} + soundcards_by_name = {soundcard["name"]: soundcard for soundcard in soundcards} + if id in soundcards_by_id: + return soundcards_by_id[id] + # try substring match: + for name, soundcard in soundcards_by_name.items(): + if id in name: + return soundcard + # try fuzzy match: + pattern = ".*".join(id) + for name, soundcard in soundcards_by_name.items(): + if re.match(pattern, name): + return soundcard + raise IndexError("no soundcard with id {}".format(id)) + + +def get_name(): + """Get application name. + + .. note:: + Currently only works on Linux. + + Returns + ------- + name : str + """ + return _pulse.name + + +def set_name(name): + """Set application name. + + .. note:: + Currently only works on Linux. + + Parameters + ---------- + name : str + The application using the soundcard + will be identified by the OS using this name. + """ + _pulse.name = name + + +class _SoundCard: + def __init__(self, *, id): + self._id = id + + @property + def channels(self): + """int or list(int): Either the number of channels, or a list of + channel indices. Index -1 is the mono mixture of all channels, + and subsequent numbers are channel numbers (left, right, + center, ...) + + """ + return self._get_info()["channels"] + + @property + def id(self): + """object: A backend-dependent unique ID.""" + return self._id + + @property + def name(self): + """str: The human-readable name of the soundcard.""" + return self._get_info()["name"] + + def _get_info(self): + return _pulse.source_info(self._id) + + +class _Speaker(_SoundCard): + """A soundcard output. Can be used to play audio. + + Use the :func:`play` method to play one piece of audio, or use the + :func:`player` method to get a context manager for playing continuous + audio. + + Multiple calls to :func:`play` play immediately and concurrently, + while the :func:`player` schedules multiple pieces of audio one + after another. + + """ + + def __repr__(self): + return "".format(self.name, self.channels) + + def player(self, samplerate, channels=None, blocksize=None): + """Create Player for playing audio. + + Parameters + ---------- + samplerate : int + The desired sampling rate in Hz + channels : {int, list(int)}, optional + Play on these channels. For example, ``[0, 3]`` will play + stereo data on the physical channels one and four. + Defaults to use all available channels. + On Linux, channel ``-1`` is the mono mix of all channels. + On macOS, channel ``-1`` is silence. + blocksize : int + Will play this many samples at a time. Choose a lower + block size for lower latency and more CPU usage. + exclusive_mode : bool, optional + Windows only: open sound card in exclusive mode, which + might be necessary for short block lengths or high + sample rates or optimal performance. Default is ``False``. + + Returns + ------- + player : _Player + """ + if channels is None: + channels = self.channels + return _Player(self._id, samplerate, channels, blocksize) + + def play(self, data, samplerate, channels=None, blocksize=None): + """Play some audio data. + + Parameters + ---------- + data : numpy array + The audio data to play. Must be a *frames x channels* Numpy array. + samplerate : int + The desired sampling rate in Hz + channels : {int, list(int)}, optional + Play on these channels. For example, ``[0, 3]`` will play + stereo data on the physical channels one and four. + Defaults to use all available channels. + On Linux, channel ``-1`` is the mono mix of all channels. + On macOS, channel ``-1`` is silence. + blocksize : int + Will play this many samples at a time. Choose a lower + block size for lower latency and more CPU usage. + """ + if channels is None: + channels = self.channels + with _Player(self._id, samplerate, channels, blocksize) as s: + s.play(data) + + def _get_info(self): + return _pulse.sink_info(self._id) + + +class _Microphone(_SoundCard): + """A soundcard input. Can be used to record audio. + + Use the :func:`record` method to record one piece of audio, or use + the :func:`recorder` method to get a context manager for recording + continuous audio. + + Multiple calls to :func:`record` record immediately and + concurrently, while the :func:`recorder` schedules multiple pieces + of audio to be recorded one after another. + + """ + + def __repr__(self): + if self.isloopback: + return "".format(self.name, self.channels) + else: + return "".format(self.name, self.channels) + + @property + def isloopback(self): + """bool : Whether this microphone is recording a speaker.""" + return self._get_info()["device.class"] == "monitor" + + def recorder(self, samplerate, channels=None, blocksize=None): + """Create Recorder for recording audio. + + Parameters + ---------- + samplerate : int + The desired sampling rate in Hz + channels : {int, list(int)}, optional + Record on these channels. For example, ``[0, 3]`` will record + stereo data from the physical channels one and four. + Defaults to use all available channels. + On Linux, channel ``-1`` is the mono mix of all channels. + On macOS, channel ``-1`` is silence. + blocksize : int + Will record this many samples at a time. Choose a lower + block size for lower latency and more CPU usage. + exclusive_mode : bool, optional + Windows only: open sound card in exclusive mode, which + might be necessary for short block lengths or high + sample rates or optimal performance. Default is ``False``. + + Returns + ------- + recorder : _Recorder + """ + if channels is None: + channels = self.channels + return _Recorder(self._id, samplerate, channels, blocksize) + + def record(self, numframes, samplerate, channels=None, blocksize=None): + """Record some audio data. + + Parameters + ---------- + numframes: int + The number of frames to record. + samplerate : int + The desired sampling rate in Hz + channels : {int, list(int)}, optional + Record on these channels. For example, ``[0, 3]`` will record + stereo data from the physical channels one and four. + Defaults to use all available channels. + On Linux, channel ``-1`` is the mono mix of all channels. + On macOS, channel ``-1`` is silence. + blocksize : int + Will record this many samples at a time. Choose a lower + block size for lower latency and more CPU usage. + + Returns + ------- + data : numpy array + The recorded audio data. Will be a *frames x channels* Numpy array. + """ + if channels is None: + channels = self.channels + with _Recorder(self._id, samplerate, channels, blocksize) as r: + return r.record(numframes) + + +class _Stream: + """A context manager for an active audio stream. + + This class is meant to be subclassed. Children must implement the + `_connect_stream` method which takes a `pa_buffer_attr*` struct, + and connects an appropriate stream. + + This context manager can only be entered once, and can not be used + after it is closed. + + """ + + def __init__(self, id, samplerate, channels, blocksize=None, name="outputstream"): + self._id = id + self._samplerate = samplerate + self._name = name + self._blocksize = blocksize + self.channels = channels + + def __enter__(self): + samplespec = _ffi.new("pa_sample_spec*") + samplespec.format = _pa.PA_SAMPLE_FLOAT32LE + samplespec.rate = self._samplerate + if isinstance(self.channels, collections.abc.Iterable): + samplespec.channels = len(self.channels) + elif isinstance(self.channels, int): + samplespec.channels = self.channels + else: + raise TypeError("channels must be iterable or integer") + if not _pulse._pa_sample_spec_valid(samplespec): + raise RuntimeError("invalid sample spec") + + # pam and channelmap refer to the same object, but need different + # names to avoid garbage collection trouble on the Python/C boundary + pam = _ffi.new("pa_channel_map*") + channelmap = _pa.pa_channel_map_init_extend(pam, samplespec.channels, _pa.PA_CHANNEL_MAP_DEFAULT) + if isinstance(self.channels, collections.abc.Iterable): + for idx, ch in enumerate(self.channels): + if isinstance(ch, int): + channelmap.map[idx] = ch + 1 + else: + channel_name_to_index = channel_name_map() + channelmap.map[idx] = channel_name_to_index[ch] + 1 + + if not _pa.pa_channel_map_valid(channelmap): + raise RuntimeError("invalid channel map") + + self.stream = _pulse._pa_stream_new(_pulse.context, self._name.encode(), samplespec, channelmap) + if not self.stream: + errno = _pulse._pa_context_errno(_pulse.context) + raise RuntimeError("stream creation failed with error ", errno) + bufattr = _ffi.new("pa_buffer_attr*") + bufattr.maxlength = 2**32-1 # max buffer length + numchannels = self.channels if isinstance(self.channels, int) else len(self.channels) + bufattr.fragsize = self._blocksize*numchannels*4 if self._blocksize else 2**32-1 # recording block sys.getsizeof() + bufattr.minreq = 2**32-1 # start requesting more data at this bytes + bufattr.prebuf = 2**32-1 # start playback after this bytes are available + bufattr.tlength = self._blocksize*numchannels*4 if self._blocksize else 2**32-1 # buffer length in bytes on server + self._connect_stream(bufattr) + while _pulse._pa_stream_get_state(self.stream) not in [_pa.PA_STREAM_READY, _pa.PA_STREAM_FAILED]: + time.sleep(0.01) + if _pulse._pa_stream_get_state(self.stream) == _pa.PA_STREAM_FAILED: + raise RuntimeError("Stream creation failed. Stream is in status {}" + .format(_pulse._pa_stream_get_state(self.stream))) + channel_map = _pulse._pa_stream_get_channel_map(self.stream) + self.channels = int(channel_map.channels) + return self + + def __exit__(self, exc_type, exc_value, traceback): + if isinstance(self, _Player): # only playback streams need to drain + _pulse._pa_stream_drain(self.stream, _ffi.NULL, _ffi.NULL) + _pulse._pa_stream_disconnect(self.stream) + while _pulse._pa_stream_get_state(self.stream) not in (_pa.PA_STREAM_TERMINATED, _pa.PA_STREAM_FAILED): + time.sleep(0.01) + _pulse._pa_stream_unref(self.stream) + + @property + def latency(self): + """float : Latency of the stream in seconds (only available on Linux)""" + _pulse._pa_stream_update_timing_info(self.stream, _ffi.NULL, _ffi.NULL) + microseconds = _ffi.new("pa_usec_t*") + _pulse._pa_stream_get_latency(self.stream, microseconds, _ffi.NULL) + return microseconds[0] / 1000000 # 1_000_000 (3.5 compat) + + +class _Player(_Stream): + """A context manager for an active output stream. + + Audio playback is available as soon as the context manager is + entered. Audio data can be played using the :func:`play` method. + Successive calls to :func:`play` will queue up the audio one piece + after another. If no audio is queued up, this will play silence. + + This context manager can only be entered once, and can not be used + after it is closed. + + """ + + def _connect_stream(self, bufattr): + _pulse._pa_stream_connect_playback(self.stream, self._id.encode(), bufattr, _pa.PA_STREAM_ADJUST_LATENCY, + _ffi.NULL, _ffi.NULL) + + def play(self, data): + """Play some audio data. + + Internally, all data is handled as ``float32`` and with the + appropriate number of channels. For maximum performance, + provide data as a *frames × channels* float32 numpy array. + + If single-channel or one-dimensional data is given, this data + will be played on all available channels. + + This function will return *before* all data has been played, + so that additional data can be provided for gapless playback. + The amount of buffering can be controlled through the + blocksize of the player object. + + If data is provided faster than it is played, later pieces + will be queued up and played one after another. + + Parameters + ---------- + data : numpy array + The audio data to play. Must be a *frames x channels* Numpy array. + + """ + + data = numpy.array(data, dtype="float32", order="C") + if data.ndim == 1: + data = data[:, None] # force 2d + if data.ndim != 2: + raise TypeError("data must be 1d or 2d, not {}d".format(data.ndim)) + if data.shape[1] == 1 and self.channels != 1: + data = numpy.tile(data, [1, self.channels]) + if data.shape[1] != self.channels: + raise TypeError("second dimension of data must be equal to the number of channels, not {}".format(data.shape[1])) + while data.nbytes > 0: + nwrite = _pulse._pa_stream_writable_size(self.stream) // (4 * self.channels) # 4 bytes per sample + + if nwrite == 0: + time.sleep(0.001) + continue + bytes = data[:nwrite].ravel().tobytes() + _pulse._pa_stream_write(self.stream, bytes, len(bytes), _ffi.NULL, 0, _pa.PA_SEEK_RELATIVE) + data = data[nwrite:] + +class _Recorder(_Stream): + """A context manager for an active input stream. + + Audio recording is available as soon as the context manager is + entered. Recorded audio data can be read using the :func:`record` + method. If no audio data is available, :func:`record` will block until + the requested amount of audio data has been recorded. + + This context manager can only be entered once, and can not be used + after it is closed. + + """ + + def __init__(self, *args, **kwargs): + super(_Recorder, self).__init__(*args, **kwargs) + self._pending_chunk = numpy.zeros((0, ), dtype="float32") + self._record_event = threading.Event() + + def _connect_stream(self, bufattr): + _pulse._pa_stream_connect_record(self.stream, self._id.encode(), bufattr, _pa.PA_STREAM_ADJUST_LATENCY) + @_ffi.callback("pa_stream_request_cb_t") + def read_callback(stream, nbytes, userdata): + self._record_event.set() + self._callback = read_callback + _pulse._pa_stream_set_read_callback(self.stream, read_callback, _ffi.NULL) + + def _record_chunk(self): + '''Record one chunk of audio data, as returned by pulseaudio + + The data will be returned as a 1D numpy array, which will be used by + the `record` method. This function is the interface of the `_Recorder` + object with pulseaudio + ''' + data_ptr = _ffi.new("void**") + nbytes_ptr = _ffi.new("size_t*") + readable_bytes = _pulse._pa_stream_readable_size(self.stream) + while not readable_bytes: + if not self._record_event.wait(timeout=1): + if _pulse._pa_stream_get_state(self.stream) == _pa.PA_STREAM_FAILED: + raise RuntimeError("Recording failed, stream is in status FAILED") + self._record_event.clear() + readable_bytes = _pulse._pa_stream_readable_size(self.stream) + data_ptr[0] = _ffi.NULL + nbytes_ptr[0] = 0 + _pulse._pa_stream_peek(self.stream, data_ptr, nbytes_ptr) + if data_ptr[0] != _ffi.NULL: + buffer = _ffi.buffer(data_ptr[0], nbytes_ptr[0]) + chunk = numpy.frombuffer(buffer, dtype="float32").copy() + if data_ptr[0] == _ffi.NULL and nbytes_ptr[0] != 0: + chunk = numpy.zeros(nbytes_ptr[0]//4, dtype="float32") + if nbytes_ptr[0] > 0: + _pulse._pa_stream_drop(self.stream) + return chunk + + def record(self, numframes=None): + """Record a block of audio data. + + The data will be returned as a *frames × channels* float32 + numpy array. This function will wait until ``numframes`` + frames have been recorded. If numframes is given, it will + return exactly ``numframes`` frames, and buffer the rest for + later. + + If ``numframes`` is None, it will return whatever the audio + backend has available right now. Use this if latency must be + kept to a minimum, but be aware that block sizes can change at + the whims of the audio backend. + + If using :func:`record` with ``numframes=None`` after using + :func:`record` with a required ``numframes``, the last + buffered frame will be returned along with the new recorded + block. (If you want to empty the last buffered frame instead, + use :func:`flush`) + + Parameters + ---------- + numframes : int, optional + The number of frames to record. + + Returns + ------- + data : numpy array + The recorded audio data. Will be a *frames x channels* Numpy array. + + """ + if numframes is None: + return numpy.reshape(numpy.concatenate([self.flush().ravel(), self._record_chunk()]), + [-1, self.channels]) + else: + captured_data = [self._pending_chunk] + captured_frames = self._pending_chunk.shape[0] / self.channels + if captured_frames >= numframes: + keep, self._pending_chunk = numpy.split(self._pending_chunk, + [int(numframes * self.channels)]) + return numpy.reshape(keep, [-1, self.channels]) + else: + while captured_frames < numframes: + chunk = self._record_chunk() + captured_data.append(chunk) + captured_frames += len(chunk)/self.channels + to_split = int(len(chunk) - (captured_frames - numframes) * self.channels) + captured_data[-1], self._pending_chunk = numpy.split(captured_data[-1], [to_split]) + return numpy.reshape(numpy.concatenate(captured_data), [-1, self.channels]) + + def flush(self): + """Return the last pending chunk. + + After using the :func:`record` method, this will return the + last incomplete chunk and delete it. + + Returns + ------- + data : numpy array + The recorded audio data. Will be a *frames x channels* Numpy array. + + """ + last_chunk = numpy.reshape(self._pending_chunk, [-1, self.channels]) + self._pending_chunk = numpy.zeros((0, ), dtype="float32") + return last_chunk diff --git a/LXST/Platforms/windows/__init__.py b/LXST/Platforms/windows/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/LXST/Platforms/windows/mediafoundation.h b/LXST/Platforms/windows/mediafoundation.h new file mode 100644 index 0000000..7f18cdd --- /dev/null +++ b/LXST/Platforms/windows/mediafoundation.h @@ -0,0 +1,256 @@ +// see um/winnt.h: +typedef long HRESULT; +typedef wchar_t *LPWSTR; +typedef long long LONGLONG; + +// originally, struct=interface, see um/combaseapi.h + +// see shared/rpcndr.h: +typedef unsigned char byte; + +// see shared/guiddef.h: +typedef struct { + unsigned long Data1; + unsigned short Data2; + unsigned short Data3; + byte Data4[ 8 ]; +} GUID; +typedef GUID IID; +typedef IID *LPIID; + +// see um/mmdeviceapi.h: +typedef struct IMMDeviceEnumerator IMMDeviceEnumerator; +typedef struct IMMDeviceCollection IMMDeviceCollection; +typedef struct IMMDevice IMMDevice; +typedef struct IMMNotificationClient IMMNotificationClient; + +// see um/mfidl.h: +typedef struct IMFMediaSink IMFMediaSink; + +// see um/mfobjects.h: +typedef struct IMFAttributes IMFAttributes; + +// see um/Unknwn.h: +typedef struct IUnknown IUnknown; +typedef IUnknown *LPUNKNOWN; + +// see shared/wtypes.h: +typedef unsigned long DWORD; +typedef const char *LPCSTR; + +// see shared/WTypesbase.h: +typedef void *LPVOID; +typedef LPCSTR LPCOLESTR; +typedef IID *REFIID; + +// see um/combaseapi.h: +HRESULT CoCreateInstance(const GUID* rclsid, LPUNKNOWN pUnkOuter, DWORD dwClsContext, const GUID* riid, LPVOID * ppv); +HRESULT IIDFromString(LPCOLESTR lpsz, LPIID lpiid); +HRESULT CoInitializeEx(LPVOID pvReserved, DWORD dwCoInit); +void CoTaskMemFree(LPVOID pv); +LPVOID CoTaskMemAlloc(size_t cb); +void CoUninitialize(void); + +// see um/mmdeviceapi.h: +typedef enum EDataFlow {eRender, eCapture, eAll, EDataFlow_enum_count} EDataFlow; + +typedef enum ERole {eConsole, eMultimedia, eCommunications, ERole_enum_count} ERole; + +typedef struct IMMDeviceEnumeratorVtbl +{ + HRESULT ( __stdcall *QueryInterface )(IMMDeviceEnumerator * This, const GUID *riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IMMDeviceEnumerator * This); + ULONG ( __stdcall *Release )(IMMDeviceEnumerator * This); + HRESULT ( __stdcall *EnumAudioEndpoints )(IMMDeviceEnumerator * This, EDataFlow dataFlow, DWORD dwStateMask, IMMDeviceCollection **ppDevices); + HRESULT ( __stdcall *GetDefaultAudioEndpoint )(IMMDeviceEnumerator * This, EDataFlow dataFlow, ERole role, IMMDevice **ppEndpoint); + HRESULT ( __stdcall *GetDevice )(IMMDeviceEnumerator * This, LPCWSTR pwstrId, IMMDevice **ppDevice); +/* I hope I won't need these + HRESULT ( __stdcall *RegisterEndpointNotificationCallback )(IMMDeviceEnumerator * This, IMMNotificationClient *pClient); + HRESULT ( __stdcall *UnregisterEndpointNotificationCallback )(IMMDeviceEnumerator * This, IMMNotificationClient *pClient); +*/ +} IMMDeviceEnumeratorVtbl; + +struct IMMDeviceEnumerator +{ + const struct IMMDeviceEnumeratorVtbl *lpVtbl; +}; + +typedef struct IMMDeviceCollectionVtbl +{ + HRESULT ( __stdcall *QueryInterface )(IMMDeviceCollection * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IMMDeviceCollection * This); + ULONG ( __stdcall *Release )(IMMDeviceCollection * This); + HRESULT ( __stdcall *GetCount )(IMMDeviceCollection * This, UINT *pcDevices); + HRESULT ( __stdcall *Item )(IMMDeviceCollection * This, UINT nDevice, IMMDevice **ppDevice); +} IMMDeviceCollectionVtbl; + +struct IMMDeviceCollection +{ + const struct IMMDeviceCollectionVtbl *lpVtbl; +}; + +// um/propsys.h +typedef struct IPropertyStore IPropertyStore; +// um/combaseapi.h +typedef struct tag_inner_PROPVARIANT PROPVARIANT; +// shared/wtypes.h +typedef unsigned short VARTYPE; +// um/propidl.h +struct tag_inner_PROPVARIANT { + VARTYPE vt; + WORD wReserved1; + WORD wReserved2; + WORD wReserved3; + void * data; +}; +void PropVariantInit(PROPVARIANT *p); +HRESULT PropVariantClear(PROPVARIANT *p); + +typedef struct IMMDeviceVtbl { + HRESULT ( __stdcall *QueryInterface )(IMMDevice * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IMMDevice * This); + ULONG ( __stdcall *Release )(IMMDevice * This); + HRESULT ( __stdcall *Activate )(IMMDevice * This, REFIID iid, DWORD dwClsCtx, PROPVARIANT *pActivationParams, void **ppInterface); + HRESULT ( __stdcall *OpenPropertyStore )(IMMDevice * This, DWORD stgmAccess, IPropertyStore **ppProperties); + HRESULT ( __stdcall *GetId )(IMMDevice * This, LPWSTR *ppstrId); + HRESULT ( __stdcall *GetState )(IMMDevice * This, DWORD *pdwState); +} IMMDeviceVtbl; + +struct IMMDevice { + const struct IMMDeviceVtbl *lpVtbl; +}; + +// um/propkeydef.h +typedef struct { + GUID fmtid; + DWORD pid; +} PROPERTYKEY; + +const PROPERTYKEY PKEY_Device_FriendlyName = {{0xa45c254e, 0xdf1c, 0x4efd, {0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0}}, 14}; +const PROPERTYKEY PKEY_AudioEngine_DeviceFormat = {{0xf19f064d, 0x82c, 0x4e27, {0xbc, 0x73, 0x68, 0x82, 0xa1, 0xbb, 0x8e, 0x4c}}, 0}; + +typedef struct IPropertyStoreVtbl { + HRESULT ( __stdcall *QueryInterface )(IPropertyStore * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IPropertyStore * This); + ULONG ( __stdcall *Release )(IPropertyStore * This); + HRESULT ( __stdcall *GetCount )(IPropertyStore * This, DWORD *cProps); + HRESULT ( __stdcall *GetAt )(IPropertyStore * This, DWORD iProp, PROPERTYKEY *pkey); + HRESULT ( __stdcall *GetValue )(IPropertyStore * This, const PROPERTYKEY *key, PROPVARIANT *pv); + HRESULT ( __stdcall *SetValue )(IPropertyStore * This, const PROPERTYKEY *key, const PROPVARIANT *propvar); + HRESULT ( __stdcall *Commit )(IPropertyStore * This); +} IPropertyStoreVtbl; + +struct IPropertyStore { + const struct IPropertyStoreVtbl *lpVtbl; +}; + +// shared/WTypesbase.h +typedef struct tagBLOB { + ULONG cbSize; + BYTE *pBlobData; +} BLOB; + + +typedef struct tag_inner_BLOB_PROPVARIANT BLOB_PROPVARIANT; +struct tag_inner_BLOB_PROPVARIANT { + VARTYPE vt; + WORD wReserved1; + WORD wReserved2; + WORD wReserved3; + BLOB blob; +}; + +typedef struct WAVEFORMATEX { + WORD wFormatTag; /* format type */ + WORD nChannels; /* number of channels (i.e. mono, stereo...) */ + DWORD nSamplesPerSec; /* sample rate */ + DWORD nAvgBytesPerSec; /* for buffer estimation */ + WORD nBlockAlign; /* block size of data */ + WORD wBitsPerSample; /* Number of bits per sample of mono data */ + WORD cbSize; /* The count in bytes of the size of + extra information (after cbSize) */ +} WAVEFORMATEX; + +typedef struct { + WAVEFORMATEX Format; + union { + WORD wValidBitsPerSample; /* bits of precision */ + WORD wSamplesPerBlock; /* valid if wBitsPerSample==0 */ + WORD wReserved; /* If neither applies, set to zero. */ + } Samples; + DWORD dwChannelMask; /* which channels are */ + /* present in stream */ + GUID SubFormat; +} WAVEFORMATEXTENSIBLE, *PWAVEFORMATEXTENSIBLE; + +// um/AudioSessionTypes.h +typedef enum _AUDCLNT_SHAREMODE +{ + AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_SHAREMODE_EXCLUSIVE +} AUDCLNT_SHAREMODE; + +// um/dsound.h +typedef const GUID *LPCGUID; + +// um/Audioclient.h +typedef LONGLONG REFERENCE_TIME; + +typedef struct IAudioClient IAudioClient; + +typedef struct IAudioClientVtbl { + HRESULT ( __stdcall *QueryInterface )(IAudioClient * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IAudioClient * This); + ULONG ( __stdcall *Release )(IAudioClient * This); + HRESULT ( __stdcall *Initialize )(IAudioClient * This, AUDCLNT_SHAREMODE ShareMode, DWORD StreamFlags, REFERENCE_TIME hnsBufferDuration, REFERENCE_TIME hnsPeriodicity, const WAVEFORMATEXTENSIBLE *pFormat, LPCGUID AudioSessionGuid); + HRESULT ( __stdcall *GetBufferSize )(IAudioClient * This, UINT32 *pNumBufferFrames); + HRESULT ( __stdcall *GetStreamLatency )(IAudioClient * This, REFERENCE_TIME *phnsLatency); + HRESULT ( __stdcall *GetCurrentPadding )(IAudioClient * This, UINT32 *pNumPaddingFrames); + HRESULT ( __stdcall *IsFormatSupported )(IAudioClient * This, AUDCLNT_SHAREMODE ShareMode, const WAVEFORMATEXTENSIBLE *pFormat, WAVEFORMATEXTENSIBLE **ppClosestMatch); + HRESULT ( __stdcall *GetMixFormat )(IAudioClient * This, WAVEFORMATEXTENSIBLE **ppDeviceFormat); + HRESULT ( __stdcall *GetDevicePeriod )(IAudioClient * This, REFERENCE_TIME *phnsDefaultDevicePeriod, REFERENCE_TIME *phnsMinimumDevicePeriod); + HRESULT ( __stdcall *Start )(IAudioClient * This); + HRESULT ( __stdcall *Stop )(IAudioClient * This); + HRESULT ( __stdcall *Reset )(IAudioClient * This); + HRESULT ( __stdcall *SetEventHandle )(IAudioClient * This, HANDLE eventHandle); + HRESULT ( __stdcall *GetService )(IAudioClient * This, REFIID riid, void **ppv); +} IAudioClientVtbl; + +struct IAudioClient { + const struct IAudioClientVtbl *lpVtbl; +}; + +typedef struct IAudioRenderClient IAudioRenderClient; + +typedef struct IAudioRenderClientVtbl { + HRESULT ( __stdcall *QueryInterface )(IAudioRenderClient * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IAudioRenderClient * This); + ULONG ( __stdcall *Release )(IAudioRenderClient * This); + HRESULT ( __stdcall *GetBuffer )(IAudioRenderClient * This, UINT32 NumFramesRequested, BYTE **ppData); + HRESULT ( __stdcall *ReleaseBuffer )(IAudioRenderClient * This, UINT32 NumFramesWritten, DWORD dwFlags); +} IAudioRenderClientVtbl; + +struct IAudioRenderClient { + const struct IAudioRenderClientVtbl *lpVtbl; +}; + +typedef enum _AUDCLNT_BUFFERFLAGS { + AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY = 0x1, + AUDCLNT_BUFFERFLAGS_SILENT = 0x2, + AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR = 0x4 +}; + +typedef struct IAudioCaptureClient IAudioCaptureClient; + +typedef struct IAudioCaptureClientVtbl { + HRESULT ( __stdcall *QueryInterface )(IAudioCaptureClient * This, REFIID riid, void **ppvObject); + ULONG ( __stdcall *AddRef )(IAudioCaptureClient * This); + ULONG ( __stdcall *Release )(IAudioCaptureClient * This); + HRESULT ( __stdcall *GetBuffer )(IAudioCaptureClient * This, BYTE **ppData, UINT32 *pNumFramesToRead, DWORD *pdwFlags, UINT64 *pu64DevicePosition, UINT64 *pu64QPCPosition); + HRESULT ( __stdcall *ReleaseBuffer )(IAudioCaptureClient * This, UINT32 NumFramesRead); + HRESULT ( __stdcall *GetNextPacketSize )(IAudioCaptureClient * This, UINT32 *pNumFramesInNextPacket); +} IAudioCaptureClientVtbl; + +struct IAudioCaptureClient { + const struct IAudioCaptureClientVtbl *lpVtbl; +}; diff --git a/LXST/Platforms/windows/soundcard.py b/LXST/Platforms/windows/soundcard.py new file mode 100644 index 0000000..7c7497a --- /dev/null +++ b/LXST/Platforms/windows/soundcard.py @@ -0,0 +1,641 @@ +# Adapted from Bastian Bechtold's soundcard library, originally released +# under the BSD 3-Clause License +# +# https://github.com/bastibe/SoundCard +# +# Copyright (c) 2016 Bastian Bechtold +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Modifications and improvements Copyright 2025 Mark Qvist, and released +# under the same BSD 3-Clause License. + +import os +import cffi +import re +import time +import struct +import collections +import platform +import warnings +import threading +import numpy +import RNS + +_ffi = cffi.FFI() +_package_dir, _ = os.path.split(__file__) +with open(os.path.join(_package_dir, 'mediafoundation.h'), 'rt') as f: _ffi.cdef(f.read()) +try: _ole32 = _ffi.dlopen('ole32') +except OSError: + try: _ole32 = _ffi.dlopen('ole32.dll') + except: raise SystemError("LXST Could not load OLE32 DLL for WASAPI integration") + +def tid(): return threading.get_native_id() +com_thread_ids = [] +class _COMLibrary: + def __init__(self): + self._lock = threading.Lock() + self.init_com() + + def init_com(self): + with self._lock: + if tid() in com_thread_ids: return + else: + com_thread_ids.append(tid()) + COINIT_MULTITHREADED = 0x0 + RNS.log(f"COM init from thread {tid()}", RNS.LOG_EXTREME) + if platform.win32_ver()[0] == "8": raise OSError("Unsupported Windows version") + else: hr = _ole32.CoInitializeEx(_ffi.NULL, COINIT_MULTITHREADED) + + try: + self.check_error(hr) + self.com_loaded = True + except RuntimeError as e: + # Error 0x80010106 - COM already initialized + RPC_E_CHANGED_MODE = 0x80010106 + if hr + 2 ** 32 == RPC_E_CHANGED_MODE: self.com_loaded = False + else: raise e + + def release_com(self): + with self._lock: + if tid() in com_thread_ids: + com_thread_ids.remove(tid()) + RNS.log(f"COM release from thread {tid()}", RNS.LOG_EXTREME) + if _ole32 != None: _ole32.CoUninitialize() + else: RNS.log(f"OLE32 instance was None at de-init for thread {tid()}", RNS.LOG_DEBUG) + + def __del__(self): self.release_com() + + @staticmethod + def check_error(hresult): + S_OK = 0 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_OUTOFMEMORY = 0x8007000e + E_INVALIDARG = 0x80070057 + CO_E_NOTINITIALIZED = 0x800401f0 + AUDCLNT_E_UNSUPPORTED_FORMAT = 0x88890008 + if hresult == S_OK: return + elif hresult+2**32 == E_NOINTERFACE: raise RuntimeError("The specified class does not implement the requested interface, or the controlling IUnknown does not expose the requested interface.") + elif hresult+2**32 == E_POINTER: raise RuntimeError("An argument is NULL") + elif hresult+2**32 == E_INVALIDARG: raise RuntimeError("Invalid argument") + elif hresult+2**32 == E_OUTOFMEMORY: raise RuntimeError("Out of memory") + elif hresult+2**32 == AUDCLNT_E_UNSUPPORTED_FORMAT: raise RuntimeError("Unsupported format") + elif hresult+2**32 == CO_E_NOTINITIALIZED: raise RuntimeError(f"Windows COM context not initialized in {tid()}") + else: raise RuntimeError("Error {}".format(hex(hresult+2**32))) + + @staticmethod + def release(ppObject): + if ppObject[0] != _ffi.NULL: + ppObject[0][0].lpVtbl.Release(ppObject[0]) + ppObject[0] = _ffi.NULL + +_com = _COMLibrary() + +def all_speakers(): + with _DeviceEnumerator() as enum: + return [_Speaker(dev) for dev in enum.all_devices('speaker')] + +def default_speaker(): + with _DeviceEnumerator() as enum: + return _Speaker(enum.default_device('speaker')) + +def get_speaker(id): + return _match_device(id, all_speakers()) + +def all_microphones(include_loopback=False): + with _DeviceEnumerator() as enum: + if include_loopback: + return [_Microphone(dev, isloopback=True) for dev in enum.all_devices('speaker')] + [_Microphone(dev) for dev in enum.all_devices('microphone')] + else: + return [_Microphone(dev) for dev in enum.all_devices('microphone')] + +def default_microphone(): + with _DeviceEnumerator() as enum: + return _Microphone(enum.default_device('microphone')) + +def get_microphone(id, include_loopback=False): + return _match_device(id, all_microphones(include_loopback)) + +def _match_device(id, devices): + devices_by_id = {device.id: device for device in devices} + devices_by_name = {device.name: device for device in devices} + if id in devices_by_id: return devices_by_id[id] + + # Try substring match: + for name, device in devices_by_name.items(): + if id in name: return device + + # Try fuzzy match: + pattern = '.*'.join(id) + for name, device in devices_by_name.items(): + if re.match(pattern, name): return device + + raise IndexError('No device with id {}'.format(id)) + +def _str2wstr(string): + return _ffi.new('int16_t[]', [ord(s) for s in string]+[0]) + +def _guidof(uuid_str): + IID = _ffi.new('LPIID') + uuid = _str2wstr(uuid_str) + hr = _ole32.IIDFromString(_ffi.cast("char*", uuid), IID) + _com.check_error(hr) + return IID + +def get_name(): raise NotImplementedError() +def set_name(name): raise NotImplementedError() + +class _DeviceEnumerator: + # See shared/WTypesbase.h and um/combaseapi.h: + def __init__(self): + _com.init_com() + self._ptr = _ffi.new('IMMDeviceEnumerator **') + IID_MMDeviceEnumerator = _guidof("{BCDE0395-E52F-467C-8E3D-C4579291692E}") + IID_IMMDeviceEnumerator = _guidof("{A95664D2-9614-4F35-A746-DE8DB63617E6}") + CLSCTX_ALL = 23 + hr = _ole32.CoCreateInstance(IID_MMDeviceEnumerator, _ffi.NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, _ffi.cast("void **", self._ptr)) + _com.check_error(hr) + + def __enter__(self): + _com.init_com() + return self + + def __exit__(self, exc_type, exc_value, traceback): _com.release(self._ptr) + def __del__(self): _com.release(self._ptr) + + def _device_id(self, device_ptr): + ppId = _ffi.new('LPWSTR *') + hr = device_ptr[0][0].lpVtbl.GetId(device_ptr[0], ppId) + _com.check_error(hr) + return _ffi.string(ppId[0]) + + def all_devices(self, kind): + if kind == 'speaker': data_flow = 0 # render + elif kind == 'microphone': data_flow = 1 # capture + else: raise TypeError('Invalid kind: {}'.format(kind)) + + DEVICE_STATE_ACTIVE = 0x1 + ppDevices = _ffi.new('IMMDeviceCollection **') + hr = self._ptr[0][0].lpVtbl.EnumAudioEndpoints(self._ptr[0], data_flow, DEVICE_STATE_ACTIVE, ppDevices); + _com.check_error(hr) + + for ppDevice in _DeviceCollection(ppDevices): + device = _Device(self._device_id(ppDevice)) + _com.release(ppDevice) + yield device + + def default_device(self, kind): + if kind == 'speaker': data_flow = 0 # render + elif kind == 'microphone': data_flow = 1 # capture + else: raise TypeError('Invalid kind: {}'.format(kind)) + + ppDevice = _ffi.new('IMMDevice **') + eConsole = 0 + hr = self._ptr[0][0].lpVtbl.GetDefaultAudioEndpoint(self._ptr[0], data_flow, eConsole, ppDevice); + _com.check_error(hr) + device = _Device(self._device_id(ppDevice)) + _com.release(ppDevice) + return device + + def device_ptr(self, devid): + ppDevice = _ffi.new('IMMDevice **') + devid = _str2wstr(devid) + hr = self._ptr[0][0].lpVtbl.GetDevice(self._ptr[0], _ffi.cast('wchar_t *', devid), ppDevice); + _com.check_error(hr) + return ppDevice + +class _DeviceCollection: + def __init__(self, ptr): + _com.init_com() + self._ptr = ptr + + def __del__(self): _com.release(self._ptr) + + def __len__(self): + pCount = _ffi.new('UINT *') + hr = self._ptr[0][0].lpVtbl.GetCount(self._ptr[0], pCount) + _com.check_error(hr) + return pCount[0] + + def __getitem__(self, idx): + if idx >= len(self): + raise StopIteration() + ppDevice = _ffi.new('IMMDevice **') + hr = self._ptr[0][0].lpVtbl.Item(self._ptr[0], idx, ppDevice) + _com.check_error(hr) + return ppDevice + +class _PropVariant: + def __init__(self): + _com.init_com() + self.ptr = _ole32.CoTaskMemAlloc(_ffi.sizeof('PROPVARIANT')) + self.ptr = _ffi.cast("PROPVARIANT *", self.ptr) + + def __del__(self): + hr = _ole32.PropVariantClear(self.ptr) + _com.check_error(hr) + +class _Device: + def __init__(self, id): + _com.init_com() + self._id = id + + def _device_ptr(self): + with _DeviceEnumerator() as enum: + return enum.device_ptr(self._id) + + @property + def id(self): return self._id + + @property + def name(self): + # um/coml2api.h: + ppPropertyStore = _ffi.new('IPropertyStore **') + ptr = self._device_ptr() + hr = ptr[0][0].lpVtbl.OpenPropertyStore(ptr[0], 0, ppPropertyStore) + _com.release(ptr) + _com.check_error(hr) + propvariant = _PropVariant() + # um/functiondiscoverykeys_devpkey.h and https://msdn.microsoft.com/en-us/library/windows/desktop/dd370812(v=vs.85).aspx + PKEY_Device_FriendlyName = _ffi.new("PROPERTYKEY *", + [[0xa45c254e, 0xdf1c, 0x4efd, [0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0]], + 14]) + hr = ppPropertyStore[0][0].lpVtbl.GetValue(ppPropertyStore[0], PKEY_Device_FriendlyName, propvariant.ptr) + _com.check_error(hr) + if propvariant.ptr[0].vt != 31: + raise RuntimeError('Property was expected to be a string, but is not a string') + data = _ffi.cast("short*", propvariant.ptr[0].data) + for idx in range(256): + if data[idx] == 0: break + devicename = struct.pack('h' * idx, *data[0:idx]).decode('utf-16') + _com.release(ppPropertyStore) + return devicename + + @property + def channels(self): + # um/coml2api.h: + ppPropertyStore = _ffi.new('IPropertyStore **') + ptr = self._device_ptr() + hr = ptr[0][0].lpVtbl.OpenPropertyStore(ptr[0], 0, ppPropertyStore) + _com.release(ptr) + _com.check_error(hr) + propvariant = _PropVariant() + # um/functiondiscoverykeys_devpkey.h and https://msdn.microsoft.com/en-us/library/windows/desktop/dd370812(v=vs.85).aspx + PKEY_AudioEngine_DeviceFormat = _ffi.new("PROPERTYKEY *", + [[0xf19f064d, 0x82c, 0x4e27, [0xbc, 0x73, 0x68, 0x82, 0xa1, 0xbb, 0x8e, 0x4c]], + 0]) + hr = ppPropertyStore[0][0].lpVtbl.GetValue(ppPropertyStore[0], PKEY_AudioEngine_DeviceFormat, propvariant.ptr) + _com.release(ppPropertyStore) + _com.check_error(hr) + if propvariant.ptr[0].vt != 65: + raise RuntimeError('Property was expected to be a blob, but is not a blob') + pPropVariantBlob = _ffi.cast("BLOB_PROPVARIANT *", propvariant.ptr) + assert pPropVariantBlob[0].blob.cbSize == 40 + waveformat = _ffi.cast("WAVEFORMATEX *", pPropVariantBlob[0].blob.pBlobData) + channels = waveformat[0].nChannels + return channels + + def _audio_client(self): + CLSCTX_ALL = 23 + ppAudioClient = _ffi.new("IAudioClient **") + IID_IAudioClient = _guidof("{1CB9AD4C-DBFA-4C32-B178-C2F568A703B2}") + ptr = self._device_ptr() + hr = ptr[0][0].lpVtbl.Activate(ptr[0], IID_IAudioClient, CLSCTX_ALL, _ffi.NULL, _ffi.cast("void**", ppAudioClient)) + _com.release(ptr) + _com.check_error(hr) + return ppAudioClient + +class _Speaker(_Device): + def __init__(self, device): self._id = device._id + + def __repr__(self): return ''.format(self.name,self.channels) + + def player(self, samplerate, channels=None, blocksize=None, exclusive_mode=False): + if channels is None: channels = self.channels + return _Player(self._audio_client(), samplerate, channels, blocksize, False, exclusive_mode) + + def play(self, data, samplerate, channels=None, blocksize=None): + with self.player(samplerate, channels, blocksize) as p: p.play(data) + + +class _Microphone(_Device): + def __init__(self, device, isloopback=False): + self._id = device._id + self.isloopback = isloopback + + def __repr__(self): + if self.isloopback: return ''.format(self.name,self.channels) + else: return ''.format(self.name,self.channels) + + def recorder(self, samplerate, channels=None, blocksize=None, exclusive_mode=False): + if channels is None: channels = self.channels + return _Recorder(self._audio_client(), samplerate, channels, blocksize, self.isloopback, exclusive_mode) + + def record(self, numframes, samplerate, channels=None, blocksize=None): + with self.recorder(samplerate, channels, blocksize) as r: return r.record(numframes) + +class _AudioClient: + def __init__(self, ptr, samplerate, channels, blocksize, isloopback, exclusive_mode=False): + self._ptr = ptr + + if isinstance(channels, int): self.channelmap = list(range(channels)) + elif isinstance(channels, collections.abc.Iterable): self.channelmap = channels + else: raise TypeError('Channels must be iterable or integer') + + if list(range(len(set(self.channelmap)))) != sorted(list(set(self.channelmap))): + raise TypeError('Due to limitations of WASAPI, channel maps on Windows must be a combination of `range(0, x)`.') + + if blocksize is None: blocksize = self.deviceperiod[0]*samplerate + + ppMixFormat = _ffi.new('WAVEFORMATEXTENSIBLE**') # See: https://docs.microsoft.com/en-us/windows/win32/api/mmreg/ns-mmreg-waveformatextensible + hr = self._ptr[0][0].lpVtbl.GetMixFormat(self._ptr[0], ppMixFormat) + _com.check_error(hr) + + # It's a WAVEFORMATEXTENSIBLE with room for KSDATAFORMAT_SUBTYPE_IEEE_FLOAT: + # Note: Some devices may not return 0xFFFE format, but WASAPI should handle conversion + if ppMixFormat[0][0].Format.wFormatTag == 0xFFFE: + assert ppMixFormat[0][0].Format.cbSize == 22 + + # The data format is float32: + # These values were found empirically, and I don't know why they work. + # The program crashes if these values are different + assert ppMixFormat[0][0].SubFormat.Data1 == 0x100000 + assert ppMixFormat[0][0].SubFormat.Data2 == 0x0080 + assert ppMixFormat[0][0].SubFormat.Data3 == 0xaa00 + assert [int(x) for x in ppMixFormat[0][0].SubFormat.Data4[0:4]] == [0, 56, 155, 113] + # the last four bytes seem to vary randomly + else: + # Device doesn't return WAVEFORMATEXTENSIBLE, but WASAPI will handle conversion + # Just skip the assertions and let WASAPI convert + pass + + channels = len(set(self.channelmap)) + channelmask = 0 + for ch in self.channelmap: channelmask |= 1< 0: + towrite = self._render_available_frames() + if towrite == 0: + time.sleep(0.001) + continue + + bytes = data[:towrite].ravel().tobytes() + buffer = self._render_buffer(towrite) + _ffi.memmove(buffer[0], bytes, len(bytes)) + self._render_release(towrite) + data = data[towrite:] + +class _Recorder(_AudioClient): + # https://msdn.microsoft.com/en-us/library/windows/desktop/dd370800(v=vs.85).aspx + def _capture_client(self): + iid = _guidof("{C8ADBD64-E71E-48a0-A4DE-185C395CD317}") + ppCaptureClient = _ffi.new("IAudioCaptureClient**") + hr = self._ptr[0][0].lpVtbl.GetService(self._ptr[0], iid, _ffi.cast("void**", ppCaptureClient)) + _com.check_error(hr) + return ppCaptureClient + + def _capture_buffer(self): + data = _ffi.new("BYTE**") + toread = _ffi.new('UINT32*') + flags = _ffi.new('DWORD*') + hr = self._ppCaptureClient[0][0].lpVtbl.GetBuffer(self._ppCaptureClient[0], data, toread, flags, _ffi.NULL, _ffi.NULL) + _com.check_error(hr) + return data[0], toread[0], flags[0] + + def _capture_release(self, numframes): + hr = self._ppCaptureClient[0][0].lpVtbl.ReleaseBuffer(self._ppCaptureClient[0], numframes) + _com.check_error(hr) + + def _capture_available_frames(self): + pSize = _ffi.new("UINT32*") + hr = self._ppCaptureClient[0][0].lpVtbl.GetNextPacketSize(self._ppCaptureClient[0], pSize) + _com.check_error(hr) + return pSize[0] + + def __enter__(self): + _com.init_com() + self._ppCaptureClient = self._capture_client() + hr = self._ptr[0][0].lpVtbl.Start(self._ptr[0]) + _com.check_error(hr) + self._pending_chunk = numpy.zeros([0], dtype='float32') + self._is_first_frame = True + return self + + def __exit__(self, exc_type, exc_value, traceback): + hr = self._ptr[0][0].lpVtbl.Stop(self._ptr[0]) + _com.check_error(hr) + _com.release(self._ppCaptureClient) + _com.release(self._ptr) + _com.release_com() + + def _record_chunk(self): + while self._capture_available_frames() == 0: + # Some sound cards indicate silence by not making any + # frames available. If that is the case, we need to + # estimate the number of zeros to return, by measuring the + # silent time: + if self._idle_start_time is None: self._idle_start_time = time.perf_counter_ns() + + default_block_length, minimum_block_length = self.deviceperiod + time.sleep(minimum_block_length/4) + elapsed_time_ns = time.perf_counter_ns() - self._idle_start_time + + # Waiting times shorter than a block length or so are + # normal, and not indicative of a silent sound card. If + # the waiting times get longer however, we must assume + # that there is no audio data forthcoming, and return + # zeros instead: + if elapsed_time_ns / 1_000_000_000 > default_block_length * 4: + num_frames = int(self.samplerate * elapsed_time_ns / 1_000_000_000) + num_channels = len(set(self.channelmap)) + self._idle_start_time += elapsed_time_ns + return numpy.zeros([num_frames * num_channels], dtype='float32') + + self._idle_start_time = None + data_ptr, nframes, flags = self._capture_buffer() + if data_ptr != _ffi.NULL: + # Convert the raw CFFI buffer into a standard bytes object to ensure compatibility + # with modern NumPy versions (fromstring binary mode was removed). Using frombuffer + # on bytes plus .copy() guarantees a writable float32 array for downstream processing. + buf = bytes(_ffi.buffer(data_ptr, nframes * 4 * len(set(self.channelmap)))) + chunk = numpy.frombuffer(buf, dtype=numpy.float32).copy() + else: raise RuntimeError('Could not create capture buffer') + + # See https://learn.microsoft.com/en-us/windows/win32/api/audioclient/ne-audioclient-_audclnt_bufferflags + if flags & _ole32.AUDCLNT_BUFFERFLAGS_SILENT: chunk[:] = 0 + if self._is_first_frame: + # On first run, clear data discontinuity error, as it will always be set: + flags &= ~_ole32.AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY + self._is_first_frame = False + if flags & _ole32.AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY: pass + + # Ignore _ole32.AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR, since we don't use time stamps. + if nframes > 0: + self._capture_release(nframes) + return chunk + else: + return numpy.zeros([0], dtype='float32') + + def record(self, numframes=None): + if numframes is None: + recorded_data = [self._pending_chunk, self._record_chunk()] + self._pending_chunk = numpy.zeros([0], dtype='float32') + + else: + recorded_frames = len(self._pending_chunk) + recorded_data = [self._pending_chunk] + self._pending_chunk = numpy.zeros([0], dtype='float32') + required_frames = numframes*len(set(self.channelmap)) + + while recorded_frames < required_frames: + chunk = self._record_chunk() + if len(chunk) == 0: + # No data forthcoming: return zeros + chunk = numpy.zeros(required_frames-recorded_frames, dtype='float32') + recorded_data.append(chunk) + recorded_frames += len(chunk) + + if recorded_frames > required_frames: + to_split = -int(recorded_frames-required_frames) + recorded_data[-1], self._pending_chunk = numpy.split(recorded_data[-1], [to_split]) + + data = numpy.reshape(numpy.concatenate(recorded_data), [-1, len(set(self.channelmap))]) + return data[:, self.channelmap] + + def flush(self): + last_chunk = numpy.reshape(self._pending_chunk, [-1, len(set(self.channelmap))]) + self._pending_chunk = numpy.zeros([0], dtype='float32') + return last_chunk diff --git a/LXST/Primitives/Telephony.py b/LXST/Primitives/Telephony.py index 4de9bd6..7a48c51 100644 --- a/LXST/Primitives/Telephony.py +++ b/LXST/Primitives/Telephony.py @@ -124,10 +124,16 @@ class Telephone(SignallingReceiver): ALLOW_NONE = 0xFE @staticmethod - def available_outputs(): return LXST.Sources.Backend().soundcard.all_speakers() + def available_outputs(): return LXST.Sinks.Backend().all_speakers() @staticmethod - def available_inputs(): return LXST.Sinks.Backend().soundcard.all_microphones() + def available_inputs(): return LXST.Sources.Backend().all_microphones() + + @staticmethod + def default_output(): return LXST.Sinks.Backend().default_speaker() + + @staticmethod + def default_input(): return LXST.Sources.Backend().default_microphone() def __init__(self, identity, ring_time=RING_TIME, wait_time=WAIT_TIME, auto_answer=None, allowed=ALLOW_ALL, receive_gain=0.0, transmit_gain=0.0): super().__init__() @@ -163,6 +169,7 @@ class Telephone(SignallingReceiver): self.dial_tone = None self.dial_tone_frequency = self.DIAL_TONE_FREQUENCY self.dial_tone_ease_ms = self.DIAL_TONE_EASE_MS + self.busy_tone_seconds = 4.25 self.transmit_codec = None self.receive_codec = None self.receive_mixer = None @@ -245,6 +252,9 @@ class Telephone(SignallingReceiver): self.ringtone_gain = gain RNS.log(f"{self} ringtone set to {self.ringtone_path}", RNS.LOG_DEBUG) + def set_busy_tone_time(self, seconds=4.25): + self.busy_tone_seconds = seconds + def enable_agc(self, enable=True): if enable == True: self.use_agc = True else: self.use_agc = False @@ -521,15 +531,16 @@ class Telephone(SignallingReceiver): threading.Thread(target=job, daemon=True).start() def __play_busy_tone(self): - if self.audio_output == None or self.receive_mixer == None or self.dial_tone == None: self.__reset_dialling_pipelines() - with self.pipeline_lock: - window = 0.5; started = time.time() - while time.time()-started < 4.25: - elapsed = (time.time()-started)%window - if elapsed > 0.25: self.__enable_dial_tone() - else: self.__mute_dial_tone() - time.sleep(0.005) - time.sleep(0.5) + if self.busy_tone_seconds > 0: + if self.audio_output == None or self.receive_mixer == None or self.dial_tone == None: self.__reset_dialling_pipelines() + with self.pipeline_lock: + window = 0.5; started = time.time() + while time.time()-started < self.busy_tone_seconds: + elapsed = (time.time()-started)%window + if elapsed > 0.25: self.__enable_dial_tone() + else: self.__mute_dial_tone() + time.sleep(0.005) + time.sleep(0.5) def __activate_dial_tone(self): def job(): diff --git a/LXST/Sinks.py b/LXST/Sinks.py index be7ef09..a110a66 100644 --- a/LXST/Sinks.py +++ b/LXST/Sinks.py @@ -8,7 +8,7 @@ class LinuxBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard + from .Platforms.linux import soundcard self.samplerate = samplerate self.soundcard = soundcard if preferred_device: @@ -17,7 +17,10 @@ class LinuxBackend(): else: self.device = soundcard.default_speaker() RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_speakers(self): return self.soundcard.all_speakers() + def default_speaker(self): return self.soundcard.default_speaker() + + def flush(self): self.device.flush() def get_player(self, samples_per_frame=None, low_latency=None): return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame) @@ -37,7 +40,10 @@ class AndroidBackend(): else: self.device = soundcard.default_speaker() RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_speakers(self): return self.soundcard.all_speakers() + def default_speaker(self): return self.soundcard.default_speaker() + + def flush(self): self.device.flush() def get_player(self, samples_per_frame=None, low_latency=None): return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame, low_latency=low_latency) @@ -48,7 +54,7 @@ class DarwinBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard + from .Platforms.darwin import soundcard self.samplerate = samplerate self.soundcard = soundcard if preferred_device: @@ -57,7 +63,10 @@ class DarwinBackend(): else: self.device = soundcard.default_speaker() RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_speakers(self): return self.soundcard.all_speakers() + def default_speaker(self): return self.soundcard.default_speaker() + + def flush(self): self.device.flush() def get_player(self, samples_per_frame=None, low_latency=None): return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame) @@ -68,25 +77,24 @@ class WindowsBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard - from pythoncom import CoInitializeEx, CoUninitialize - self.com_init = CoInitializeEx - self.com_release = CoUninitialize - self.samplerate = samplerate - self.soundcard = soundcard + from .Platforms.windows import soundcard + self.samplerate = samplerate + self.soundcard = soundcard if preferred_device: try: self.device = self.soundcard.get_speaker(preferred_device) except: self.device = soundcard.default_speaker() else: self.device = soundcard.default_speaker() RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_speakers(self): return self.soundcard.all_speakers() + def default_speaker(self): return self.soundcard.default_speaker() + + def flush(self): self.device.flush() def get_player(self, samples_per_frame=None, low_latency=None): - self.com_init(0) return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame) - def release_player(self): self.com_release() + def release_player(self): pass def get_backend(): if RNS.vendor.platformutils.is_linux(): return LinuxBackend diff --git a/LXST/Sources.py b/LXST/Sources.py index d10aaea..4da546b 100644 --- a/LXST/Sources.py +++ b/LXST/Sources.py @@ -13,7 +13,7 @@ class LinuxBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard + from .Platforms.linux import soundcard self.samplerate = samplerate self.soundcard = soundcard if preferred_device: @@ -24,7 +24,10 @@ class LinuxBackend(): self.bitdepth = 32 RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_microphones(self): return self.soundcard.all_microphones() + def default_microphone(self): return self.soundcard.default_microphone() + + def flush(self): self.device.flush() def get_recorder(self, samples_per_frame): return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame) @@ -46,7 +49,10 @@ class AndroidBackend(): self.bitdepth = 32 RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_microphones(self): return self.soundcard.all_microphones() + def default_microphone(self): return self.soundcard.default_microphone() + + def flush(self): self.device.flush() def get_recorder(self, samples_per_frame): return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame) @@ -57,7 +63,7 @@ class DarwinBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard + from .Platforms.darwin import soundcard self.samplerate = samplerate self.soundcard = soundcard if preferred_device: @@ -68,7 +74,10 @@ class DarwinBackend(): self.bitdepth = 32 RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_microphones(self): return self.soundcard.all_microphones() + def default_microphone(self): return self.soundcard.default_microphone() + + def flush(self): self.device.flush() def get_recorder(self, samples_per_frame): return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame) @@ -79,12 +88,9 @@ class WindowsBackend(): SAMPLERATE = 48000 def __init__(self, preferred_device=None, samplerate=SAMPLERATE): - import soundcard - from pythoncom import CoInitializeEx, CoUninitialize - self.com_init = CoInitializeEx - self.com_release = CoUninitialize - self.samplerate = samplerate - self.soundcard = soundcard + from .Platforms.windows import soundcard + self.samplerate = samplerate + self.soundcard = soundcard if preferred_device: try: self.device = self.soundcard.get_microphone(preferred_device) except: self.device = self.soundcard.default_microphone() @@ -93,13 +99,15 @@ class WindowsBackend(): self.bitdepth = 32 RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG) - def flush(self): self.recorder.flush() + def all_microphones(self): return self.soundcard.all_microphones() + def default_microphone(self): return self.soundcard.default_microphone() + + def flush(self): self.device.flush() def get_recorder(self, samples_per_frame): - self.com_init(0) return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame) - def release_recorder(self): self.com_release() + def release_recorder(self): pass def get_backend(): if RNS.vendor.platformutils.is_linux(): return LinuxBackend diff --git a/LXST/_version.py b/LXST/_version.py index df12433..f6b7e26 100644 --- a/LXST/_version.py +++ b/LXST/_version.py @@ -1 +1 @@ -__version__ = "0.4.2" +__version__ = "0.4.3" diff --git a/Makefile b/Makefile index 39f74ad..7eeb1c7 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,14 @@ build_wheel: python3 setup.py sdist bdist_wheel rm ./skip_extensions -@(rm ./LXST/*.so) - -@(rm ./LXST/*.pyd) + -@(rm ./LXST/*.dll) + +windll: + cl /LD LXST/Filters.c LXST/Filters.def /Fefilterlib.dll + mv ./filterlib.dll ./lib/dev/ + rm ./filterlib.exp + rm ./filterlib.lib + rm ./filterlib.obj native_libs: ./march_build.sh @@ -32,7 +39,6 @@ native_libs: persist_libs: -cp ./lib/dev/*.so ./lib/static/ -cp ./lib/dev/*.dll ./lib/static/ - -cp ./lib/dev/*.dylib ./lib/static/ release: remove_symlinks build_wheel create_symlinks diff --git a/README.md b/README.md index 382c65b..7ec7d17 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ LXST uses encryption provided by [Reticulum](https://reticulum.network), and thu This software is in a very early alpha state, and will change rapidly with ongoing development. Consider no APIs stable. Consider everything explosive. Not all features are implemented. Nothing is documented. For a fully functional LXST program, take a look at [Sideband](https://github.com/markqvist/Sideband) or the included `rnphone` program, which provides telephony service over Reticulum. Everything else will currently be a voyage of your own making. -While under early development, the project is kept under a `CC BY-NC-ND 4.0` license. +While under early development, and unless otherwise noted, the project is kept under a `CC BY-NC-ND 4.0` license. ## Installation diff --git a/lib/static/filterlib.dll b/lib/static/filterlib.dll new file mode 100644 index 0000000000000000000000000000000000000000..a4ceefdf105382e0ad5ed12accd25ba992cdbbda GIT binary patch literal 104960 zcmeFadw5e-y7-^8NgJSL2c%dPG-A*ysFiw24QK+X>|m-@VMOJoRbaaJzP->lf59C$FS!0YzWVELzx|H5?}nRv3lg{czJ0qda`{!h@7!_YO=p#s4)o`m zZhLIv=gnPXAI<;!cl7kf4Br18t$1vuexChUQa{gp>|UPDU1vVJgXc>V&wlKF{j7M* z($CW$`z_CcNfnPZ>F3zo^DOucF>S`R08}BW0gtv|Uc0O;fW0Y@D!QgR@ zUF3Axxi_A`)2q`BPI*YWz~Ly;S-nrk9=-|^`LDn+hZnn2{_)j!JEwAm)A2*g$bc8gZxi5RE1H9k#-&ygSmc;p3_ZmL} zvWz`94pRC*pTjZdtOYk-AHUw=IQJ)H(Wc`e{{5YQef|rvY|gTaIUXwG!zutN;Ti7> zkX~D@XU(?@>M=4_-M5Y5o)&d`bLYpezu~r<9MVK9KQb0+x6gkezMQq-rrYkQcW4|r zz?H*8f$x8%+{8ZB{{JKxh@@hEUnCXrmz$}ney_=^$4q`$(dp7VG~=;c{%mW?<+mb! zv)bifHr1?m`OW$gziCbNSH{Smv)}N#?%5wUX1G@FH-D{FA$)-t1LJ-n=beZfI6?MK~T}s@9P}-TG{7o=9gx2eMS4`KL35w`~kSz z^OnQ0-bwbd`vO6subG|x;Pji6%ul)#|J#g=qs9I+!fMq^_8Vp-^Vfbj|5|)Eez;PWD3is* zUc{Ap5m#DG{zu3%o@nwv%9Hh6x$HFQu+pq&q^9*lzYx+Gb-V|XOZ@@oG7milWZUqxcdKk4*{=MOs-ruxfcgb4l{YF7KBwEyVm|Wy4Tz2C&RO?fU(E#(@^+mBsXrKk-i#0 zjH_GqVw_5^bErOEZ~5`M^my{d(%<-HmysHuM_M86Qu0PY8$U6A`k+nF#|qU1=o%qF zEY_*4Qz0{UPnlorH_iai{HIy+NflN}J+_6cZ+?xl-P``*a4fnc|JvklrRhcI^WNlt zQui`Tm!?jIE>vNh-Q-^_g$!LtlDf%?Ot}E-Gp+mmdvvBIC{ppO`Nr8~FPf=a=+cF+ zu{$U=frtHjzhAPCs@Ss``*j0V+HZ_)^6!!gbgp=&oSE;M%!8zp0mt$Y`4=W}hUFO3 z)~BGUq1Z-AwtEhWb#QqNg=GJqLXn%@ke=O;p50Jxc04Z^v-%kf+z{W%}=%h9BC zq^ER5myY!AfXFf-#gRTRpUa9|BH%0$^x5Dr$x1{by#{Urnuz~4nO$=c)Kgyc2Q%|? z{btIi`HL3nm-+h*78mFT@}wzVvVG#oH2zl&|*fPOn>?l~`0!zFfEP4u;9+fXB zd#w9|FSA**as5hK?8i+uZ-)1mL$gH!v|N5jUwQum8bU20>^H83jH&-V+u_(}T-%zX zk|H}a?}%3?KXAq?b~u_^jV9S3>J{{umR&1ZTcKy2$q7U5 zp}Nt$flr-oCockm^3iQmYy5KcCc?D3mMW@|A2VInvRfXd^*q#C2G!h}I9>gfZxr|O zeWLxnO1|H~_fGfYjZQ6&hQ?)EHmOlG7;4HUE>`nMnbt0o-4=&^b*VcvnDO$M)v*>D z=!jY`nO0%UDt(@f8qRLdy3eC2jiXFKRjX;u5Y`a)2TW_J-vk%F@T~SV#TtIvUIqWx zc)1#6REAm(Tkg*gots-%l-iX;RH>ZZp6yDx|IP>X9Gz;+_>vQjkWr90Uu~74-@*qq zgeE)PZ;~(nF_P(kTeTz9vQ;f0WjwP>?VzyLp82&6!^7mR0tnMuZ>FaEVyRGhs*d?F zJ>R-oPf#do?X`|DMa?@BMs?YAnqE;{)!O|o?WZSZlZTv(L#E}P&*)9i>`qU2W<1%l zC(%KyT}*go^Xo>_A1MsNtk=Mn`Yqd=Pb@f=N7E1betH17?N7<9GjTrcjiXxC4ym?) zZ)WoW7L?i~OWtz-n?C3>J~&?{TUJe0b2jcyW(yPJdwQ(u(_?%zHP_iSZD_sua@n#Y zujZl76ji-0&|B>?gG-o%P*+AYrSS?jhDt8*|LjYCfllE zN^16I%TDLbYBkT1beWw_*OuGwU+m;tO*z%~nr(Z0W)}Ixa)(aW3!EXA_a&ohypP?K5K9l$E)gBs6yRRqR z-t&ZRsQ2m5OnUt4Jv&iAgNVBfyFyK7l`?N9-(*^@(UlC-v`kONZ3esI!>U^Mn#P|> zD`+tBuW>d_dD59VD7cflgY3G+YA)+IbDX-4=Gm69ak@#@xYkUuDn8%gc+#mmlsXWX zGg&+kmmk^f<=eU(;G0?#Qfa7qBcNZ3RV}%h5Btc9{_@;n zI4qlm_0X+uWM8p-hbKRGCWh$sX?UuB>Ijs0i^QY{WKIt+GyUGDDH5-wG@)6&J_w# z#fRLOhpAUD&@ThP(9Ps$KIj=>=qhUa8YVnSO^3BRadYypbMY-I4asHZDw^ZOJCu>o&qeeTX@K;Jb7Knqr*LcU#AAZ zcU}7$hVD>b@}uV+a@)4L_F9KyZneAYt!y>{zb-XNK+N6pD15Q8TI@iH*{cUJ%(>O! z^2F5Gn03apR}3?g>&nf-?Z&g=A~Rjm5KEW5Xr_kwqrvU*a%)OSEZDYSpf%A`*cJ=6 zE`T4dtH``rwZnwq$;ud(yvy%cCaHm}!y=vqdRdmY+c;|3troKst9F2wK~1kGip~4V zZn;~}7URg{aW*-Qmfdkz(~d-m`Y|YiIPYhW`2owCO1n2UvyVF6bEP!@wQsgIZaH1J zb`Ot$eT`gLNAu|kxADjfh5HB*EXtOR-yw)@_n5&q7j)YzOV*Q3v-Aq3?qs`e=%~7m zZ>YPD36Q>7=C_GmA@27{OWvy1s*T348b7%`e$#D@`@VhqP0>NyEv2bNMg_q!z7xY-GUL!s-PVrt#~xg~KA2`>MN~j&QQe8A-WKl9^)iE+$b^wXw$9o&2JB>BOpy z;dHo|zV}6}oyOXC1}q#zjRI;6ryv|_Y3XWVz-jY{@vDgS;%YxHLABJ7Aw{g$GetGl z_OSH{#J*us-2^l_*A%iDWC8GPz=e?kTX zOA4J}Brl)=`fzSD+urt>gCMn2{XRa8Fp|=?i7e>z7mY76nm?hA<$jjhf8LYq@^rd? zLy}eINV$JSDtus_FMdO^%h&0)c$K}9a;HeGaX1RM0r&SvM^4!eR9^sQQr?2w6Bp6Cv2>J z>zIX?^|m+3w8}=v2N-6X@`kNV$vs7lZ)LGRnx&^Z9L`kf&zFEp_{%AbyKL`woQ{UW z&P9XF#LU5x_ih~*yWW(zp1gJ$6YE3)4e3+^@U5pjkWkGv%Rlb7Vu{a%OB^kX|&;*6ib z=MpoGSs+@tK5D&^-G;2QvYGG8hIyB>3Wju1IiD;sPlG(JG;Q;=~##?dZ(0>B&w87fww!BU0^8&f3ED;p{f+ z7=S-w+%pH@H4nfwxt%0bukn}s@+2L8?nc*3%2xw*S#rScM@d0K!Jir{Y zt;KU;i8nFO%(gadOzf*_Z9kzE0@A+Orp?*1VVgiU&>Ul0n?XhIcGy@&1E{j4BA zU$%K;{N|Nn%}su2HdgC}b5-)ra#y0(3j5CVxqon6@=$?@;NeOc!v$u2or~!iVisP3 zOcAY6XE2KyU*^OE4lh`-!WE|FF=@5bw4A0D@@C6itP;j)YTRYt&f&$hcB_{(RjQkw z8DLf)ABWXkN!1;Red&oJU4fXY*35y;vHF3r!i~whT#lv3#_A`!%=#-mQOkw()>RNK zY>gJSwMVTsWIxgWY(8XF(|qH_S5BI=CqA_C;@Zo34s5(wh8G`|IYv#RNW*megD(=v)sRZ5!ljY>kc871lPq+=_BQ)9-^)p zG`$Oo`1LF*UQNlf;ThyVzx>T^Jcq2>riH}S#wD&VFw zslO>(wot!!=7eX_t@WzVI82xRNm?konldT(PiY}}C>uYO2{{uD((1~V)!N++?sPv$ zjub|&D^>#mGN*4asnV8JdaYCk+ia4PsMYDNA%hCk(5Ty-o0qEBX}^}%YFMG^lArPK z#kG$Zg0pOgM#7M+dm7)f?I;8bcIXZV@<3_TGy0VN6>LI{_H087gT98J?v++FbCjPne~@=&BC_e64!Ef zaEWJm5zy@tKHX3I&5zF%X8?zZ##4=qmQQax|k}5w|D)k5GXqmQ;tl$e<(@SDj zH^M@#^+~ODC}zE_{tcC`wWc{NRht@~xiMNl*_+)IMQ8J9Y^2Ap3f^{sYx$-8dREj# zEj$o{YSu%xtYib$kNVgJ-l`q3LXN%Ui<^Ci4VJ-7k6Ilf4KdSkjIC^d{yb1TA$Qdha%R(5;cUO)>Q4VR1MaaruQ|*7kNY{-d65fJ}4Z#?FSy?fnQQ7Y;CMT`+PlW?Fd^hCO2iRHZ-pW_(%L0 zYocpXwaZ$5GHTbP>WJl9aqQ&acx&m9$-$+(42%TZBUX0=<3zSC9DHfPdKu}waJmML z75C5Qv!GxO?HV;*vDA%zAFFq4jZw1=pJ#>GXML&3C8*o6)M@CMlf9AD)hKrBD|>O2 z4zrG34b0xXX5r56Ygj;P5WJGTADU*SZ}Y!pHh#EA58nOG4N$Bq3}iJ%%$kOQ#{E5v zAchwV?$$z{;WfR+5U1e{`HUe^FRzuxkg$*U5bu?|*YO_WeGcz+hIeHHEt55etof$Z zu*!a&ZwyJv7cYlt!L^2GK2o!K8k5~hbFlhF6*4KBZ(XeGLwJO3 z3YjR>$Kg|Yo_L*+RWFwE%M|w$S-2cfuzDf zyH$r>hrKp4pw{FXP)|z2x;hZEwu%XHpx*Ijp*g3j70Y!%a9t?^xyTVdqq> z-^LATp*6>IYU|ArwL%!3Vaw%A9{MsdC}tHztrwL@jGX0*NzClT6mNkJbwKv+h%OSho*9qC^^xYXlX?n?1%(@Z@Jn*pL50310y zBB@M?)n9{IW{T(1^oZkR0@=q`3`3TWk3k-(FWDI_+$g4%Vxeg}7JS5qq3J2vXxg#@f38|)-Q2YWot0<4pQ&KjH2mb#hF5( z^vyerCX{YS?cFZ>d#CO5g!-Pvc{}?ngxqO(%X#TByufE7=dRP+H z;Ar|=9`)l-W!)BauOu&8KdT&!4^kLPAY&zkf`P4co;WvJy{y7$`o2_@9#VGcVbX*E zs4kagtZkY*zNDgbnyHpLaRj&n@f&(t{^e0E$0vtdiu|qvyH416aCf%NIxLIY>dgB% z>Vz;u1eU^(dgyGsTH{Byn@v5a%PQ1e`?J{?+qHgCIfEFW-j*$q9#I*_ek6Pw zPKix^t9pJY!>#P8HtDEp|DCJWTcME(tL%Ni_)w^p+D2M@IO-p#=x^dT@rd7K8Xl8g zi^Cjo^R?>U-@@P!OPlx^SjI1-t*hk+1>XDp+xQ-{4w=^D+Amzow!)dQf%y@S#~u;b zC?#T?SwFMHy3=EwqxWme{r*FeS=cVUU^HNiTUR1lM6Fk)Mk_hO7EO@=LLfGPB?BG+ zbEJFC1k`X%I3M;uLRqG4P7iCG1^I^uUe#wfutNnqESL3cxnI66&kiAu|I;9Si6rrJ zQEqB21*>U5u7D(pkhLUgVYArI7FJ^+e-Y~t^0;yTRW3|!m$;1d6cV=L*Fh8kPCmg& zaz(kzXzn0oP3}>0jr;zG3^Tc+l2ZRgQk~8GnaL$SOfUZ_s`N?nZY}Yt;W}|SlzCo- zateWK&KgZ`QpWBoaia8)DDTtyVk}Y8a?=`02`EmrZJ%x)&)>yry`-(_0ri|t$Z9e> zLjj<0C^cAH0x(=5A2UYrVosx#?y>yH;sFPjcwo_8W-8HYLtr%jt{^|ohe`E;#JJCr zPs>Xj#=XyhajV&u1*Sme5nQ7i0;rClu&tOPNMIlQUb)l$zGQuAmy*l9d? z4t3|63`E&P_Js#U(@J({-CZ0nY&j(KLP_7Nd~pUONTTuCSg zE$+1<_jpr?vDRclWu}*?Ny0djtuEHDtVIN%+=nyxV3m}!PH$}9xI84`Li5e~a(8*E}j&0TPG>BA)P8p{GF| zgl-6u!`bzb;7!W7=K*$Z%(|(;DjH39CrvmRY0J3h6G2P;EFYA6pc(@SVEl=Z2QDFY zs_7N^X1yxgemg6SCDgo*Ed8;0u%OJsX}P-74R6CFu{Btm?l)7L;(fPdK2P`%;|if=yoruM-sG81Ge zF1n!MuhJ4*Pqw*@7`+Z>_ye>HGN}J@1X_cEX{(Kw#nML0$1>SV0*=HDh?*V|HIE?* z5K&XeNiWs8Zf5(R5%0IE0UyZ#Yadl3iYS&Y+foC$k@p$wv(4&PBjxv8X%z#I8bczM zVw<<=Ee_Ofvd5a&B;c zv>f{GS) z#!tv+4OFA-ED9QTC9MMm zWDQpW8*ETkw8nECR3zITd{{F~2wC zGwZKG4AsVgeXJb`a(IUZ)KvByyDe6@$!Nl*CfnAfCAFZiq`ahN%c6hg`+=f{_&^aj zjpkqL;O={xS$=ml@n0H+x!rSGZAntd{>iMs{INVf>&yq4PfOQA*o?F}Cg{y@`i!nv+s}prYaF_En_O(0M_=2XQaUlL+ zQaKc}_NkXYK;OP21N0apgukslo$a_rsAYKdsU(w2LXO1wIg{{N)yF2huxuk?n2jsu zi1L~#j8qrL%dEmv+9r(ReZjy3ll)Gj89%w>z+~54qv=6fP2TN-r6a~Wl7G>=1Nw#Z zA|-tBS+tP2AZA_SgYpXV7WqL-Cs$J>I0@-Q)P;yRjaYfFA~UPau$}JX$OWPkBi7}C zF!n*?{!^t~@KS1wAZ1#YS7ct&BDSXJW1wi=|J2}=I1)h(v#^8($C~xG`j~G+jrasq z6mCeIs8P@}D&w9%P>qqnB4mB!#FrbuMd5-OjVpXI^hHl{CJHuYU;!~I!?haiFPJ+kb zEA8L;we4Re?dMyCcck4{ifv8rDVOplM~Y|?j@dmR(Yr6^AmFSt7!Qu6jLHJ3}E_p@^;ZuxYo+d))Z5 zQ+@JJfyQn&mX7)&R=C`{!etG}maTn4wkb#ylvl*88|5^ECQbxd=QLA6Kn@!*Xx2QD zkgjAbmB=d|uOb znrz#(w9d|&)V4ZnedK8WK_A5r&v}jYmhCf-S%+2PLm9Npyhxd^L|N&VfJ5t$`t`ev zHB^~I5NSFrpauVJJg}F9?bIFJ~Ykt;UZGU{GPrdHGsj?g(Ql3>rbGhX52rkKB_*$c;J3s zgy_WT=ar}_aD7qyVuh+l=R2Rgh&8^PeYpH;vwnjpj>In($TFK3txq`AFsWs&s5A@r zY7u8|H}8df&4G_prKXBj#5~MFCmbBkZjB~4h{(C(`AlK>X=iprICz$Ee_U_J8UFd< zmi2j&MMn)_@=${@_4L@IdgML9ISx+ilj`c_*NE6<)_2;H?btLUbEtX^OnY)^MQ)|K zRGrS+V6W6k_IJHfVQ1(wf0#;;-1Ij!K^aMG!$nJfkKfToG)V7*ZQO+Ki!NiWlc?(K z9@0^(HD;mrZ6o|5im%>G-sutE_~dh-*{BJ7gz5GB@0zgU6; z5lkD#6Y_b`OwWVHZc!g&NXdDL%VHKx{}tJp5>{@eF6fx-aT2rm9B;B{3(ZtP$3$sZ zuh#xZ%*z0F&%4DVK0q~6vttr9I@#(psi~ptH`aD8#0|cIv+|P{gbItpV&s7WZ@gS! zPPQG=u@lqT8KF?NY(2&(T%O>^CwA~rqNe#^tJ$(MFt6%-48#dH_Tz;61xM(oPCLFw zGkUfoNX9y+xqyHWkQ$*#dSYR=?5#%y3#?dK6%vJ9mvqa9=ub3b|iCG8NWwR?B@j=#$G&H??3k$w^$DNa5z?wrKeyydswXo=( zvlIdNx1*KN%^@5Rq+rF#e2k>0{9CxThqRiEgG!ofm)5f@2+uawzI~GM{nn{zM#&zKeRaP%SmdNg^+ofwFJ-V;uT{+-#vJ7MDD@>pkD zw?f(%dXh_A82mvy<8Q;h^*s?3GM-o}>LJ5RC{tp8V*paqYTfOP-_;mk#I5mL8v_r@ zb4Fu;@`PA42EHfHiy8ypm*?4y0n9--E*b+*7w{a?82BHa)@zB-S|MAR^ViajBXjoJ zW_eVsZIQ>xYn$Y8{Mtr&9JBUbd6cY8%EP_(9v-QyylZ6Ep+agf4W;h(W(@JQGepTd z0<||&w|ctESqnCF6s7ttcUmIZ?{n*}SO>2WUtNIo**s2GV@j?{&$mc&I5TC}b_Rv*t*Twe+Ib`PRKp_Jv?>SV|H*Wqv#TRxK049?r_#_yk)QnfK# zmi!Ifu}~(gu|u0ZjHuomkCvYcX7=|6Gar{+BeOGjkvF;K5D7=T7{UFwBsyv;7+wvO zt48tk@!ZQ3s#0@EQ{+_5%c&Z?eYG#g(o$sFE-@mjnSit^xyrL66gd++@PSy<}p@0>lB6WVY>=?-9kCLQwW>|RJ$^vya5r|Sy3Z{*c-zkyD_3c*JEZSebg(Rsk# z5+BZ+oN*^6f<&#zy}N9&CYn6W5$$ve1rb(IY^ILK;N=O8`>5gKQ3)f|^ik-UQ6b}p z8_aBH=7MOpSolD3OYzD=Guv)Fe*xL34W4s#DA3TOzVk8EbRa&0RuWH!8ouD9&pkq?jk)fHbp>YRVip90C11QG(;;!)n#GcdHQk?ljvJn6{aCIPF98H}lz(unz zzveR-dfb%Dwze1zhiAk;Z8$tCQQ5REQ7*j>3|tOL;&WyI$Ap>=Fo=%rD&xhPPVYq> zg)a$x|Hyx2(8OAM+CPF`YTqHm2|lVR$*a!D?#Ce<#IYYQe6B6R?RxU!w>2 z#&|p1Y_gPx9| z5BG*dBoaQtepxhJ9Yf_8AZU(MK+WdaJ(jQPrn6`=J+dW6RP$;%4v)(MN;C>~Y+C2t z4dF0TBd5xPVY@?tixNmU``NXonOYWNtA@1LAmL=GWu_SV@zp|oV%D-yIA?*IW{Ub{ zTQP-aC5EVbA*AWMOHzTVjolK1b6MJjsIpVHDn85f%?VKyBZ-dAnXTQAefU1oclCrQh}0&_ z%hkX$!SA=n2_p5VOTg!vN_wve?ygp{_&&15+7V2+k7=Myl$5T;ikDM9g|5Yq1NG}OIV zr47*|keh?0|H0NFto8T0yPg;x``aS>o7L}|w&QD#0n3h1LOsFvRlHm*FVE{2t8}8g zTp=~B(z8gjm;&8l-ZytYW!o9`5WYqhnL1cY09zHS!hv)$Nd1_cqt?YydU&2qXeEGC zB3^hxLIUKvU!swi<$elHCM;ItLuS>5B983bD)B2-1U&6T0iO)64}D?WCsGZ?a6nNe zW&wQ0OlCv~UN99AHyZ4Se;lo9jiss(X4Ef`!)W_z+BU5s>QSjylrOL7gjEC0s)?TH z&W#kU4Q_>`2RFq3q>nrtHu1?@yE4Bv#SX!|ZB+%sbX$Y$wh(n=Ddv?}77`dy^C?dG z98HOHL|AJfG3)P9Lco~bK(uqxUg7J}^dwI-HA&fW2c}6xq3N~hOUrB1lTa2>YsWl+s0iQdnx zt8{6t$>cvRlb&VYHW$%m99|={QFQ2W_?~yi>qZ@fR)C@asf?m+06FSed^UksJPUhiI=Sp63^ay#$ zj~m(P=YJ$SUHaocu))*ki|!+M=tSgYul75eP)J^`qLgXc7$45rluyO^!EXKSMZI6s zWs~hzL$t~aR&+llE!%UUem1bTr1bSU^bZxeTqszv`bPtLghTy~RWGNW@d4b8?x<7t zPxtE=_j?yuV%A0>L8T&giBgpq8CmI6zwi>d?10QUREF)kT2cZ{d+nIEUVBE^c*f;f z=`_<(CxT0b$r1KMM)vB+@X8_TTHS_FLG^oSLsGg87QHl;8`E_ckPZ9;*XZsZ=JkO4 zifYbz#m`tHL?c_)^iwVJ6zdb!TTZw6vyjG6IBgcFW#|MFRN_zzagX#FZ^%}Klhq*U za3U%sDv&LE;33i0y$*IxWul~NN5^C@JK<_ihIl-JSg{vWs_%nXu}=zHNg)K^KpkW% z*{p&10HMb0Q+zEK3JL7_Pt!oLHP~_8bgJ&u+ve(WVP)b6*MX?9Q~{}#Kpaa=#;`Zp zqh4VRNWx5C?Gst#9A?V8_fbamXhEWs!*wbB(lNQBz~-o!D{@i!J>75a9>hNDebqc3 z>06&G#DZ$aep$P#vdMRl=k{6M&;~ht@Pf>Ym`K#$YS29;=5BB3zy?no!TZ3S1GQ%| z`Od-2Br;u`py0ULfQ18JfKNJ-?|hyaOSaR_9!R$E?ho~OW;U>nlFYOZm>h$#(l@#EW8>MvS9Q6v#u(3tC zYoXbF623iWE~PT1WGyDku7?1+N1{hwxeLTpN0)&b{#nEZb7pz!^#kr{k;eM7|Z0csYTK#Ul(j;j65{qJ$> z{@G244i*d~`#Nq+}dIDo^agiH5>9JI)mr)DC@Ef zJ5J)=wT4q?rfM+_)Id{e6v~26OQK>kSWOK_s>YYXs$hQ?3&S35VUTT>BJjTEWjgy4 z{FmH4sVgX0diQUoBWVx9Q93~Y;VrdF=!TuYQlL&xnW*2=(<V?SoR&h zx7>I`j6pGG{z+41n|}iv=r>aNa%i)e~QG%F^v+DeV>Qfu_P6T?B=UK^}bA3 zzv+FMqkhu+GEcSizL;uR?@OJU-}|ye)%U!l8f3^<^}e@)12u_vnV%dAKs6|MRn)_D z?H)1@ftP>k%xW?jbjZewEyw2Rf41zD??c5)e>9GH;T#uEyem&fCpf@nEU~t7ZiUZi zD&JY#MW;HFRz;B%tkQa^VQXKUX;CAj)<*RQzGYCTcB(;S(8#e?X*DG&aiqeYgBB^m zYTu+*0vL7fBm>1)diwWpCNIhUYd z9LqxVa@JZ_{h;KX#1y&J-boZo{nD=Yx_vCJPV)Nbe@EM>O7A{LAOA|Ys$S0wRwwI0 z?6OL4BwH*P$1*ZsZe))Nq8Znt9@<2e)xK4cfv(n&_MEkQ6j4&sCWL08;}f(E4Bu3W?wrsKR!GwGQJz6SrM|#Z@(#vO~-iV6jkZaOKP~99e(e0QHW@p^7yr$)bAU5mJk0#n>s0XCdR(!M!kk4i2Z;=EFxeoV=P<-SLrl<)ZP;q zBMS_9Eq~pKX`LgU(cPG^62WZQt7)bJu^|+=TMk>N?)G#{@;XIoy&FdNn%%C%@7sdw zULtw2WsgxwRuHlgJHOY>Z1e73S(ORVF`9vEy@~Q{*)3EkDDY|B5w8Fzd)vTXu&7E z8X?=Eu8u?O$f@N>{x*uMOUST|*&;Q~mt=?uuk}i*4x)nw45I;Yh&9==*?=0e{zs%1 zk*z|6ma)jNV~dl5m36*U3-C=$+&ET4L=$Q!tb~t}36qYASVsHeSrMc8Fpb1!kBlbO zY`1K`C0jjA*L-px1$NWQm8ogK^%&2Vy+fZ>8?8gxvTjSjK_+7!$z)S{35zL7exB7v z_gkJ5>XsJthy-@kcZL5v>=*nIJFf`x#gTnNA607b&FGmOrhUN?YWd!U!6csRF!xR!wX25fLK3XjrzF z;2>+}hajX?{Wr0XTw>L*?2yjHJXCkaW>~9bxB-hUeO3hRH$5xB+2TI({8NF4ecnzqL;`l;4Q(W`Fs_gd;8JU_6d zAIvG5PUh6ok6JXTD%Mx&^r)rBx*T{zOs9GeDw-`@MIoe_wUP~LDxgwnvhw)JpT)Aa z<2PIIo#f%t#ltXt$FRR)y$V~AHdnXs)W##_#SX{f*qSqW;p@qKtvV2~UQOSL&AKhR zUBxdx(B_PCCrs3+>AEaEV#uXw*Y2P4)kdts>jaBAo6~hYWzMt9`1+Nx`9KW2xe!r~{s>)}xW_!W z9~bk2Nm1j8UACzo&EAOY%9N@9LJeo!6(VUYAFb1z?koK~zzVY3wYfQ4HZo-|)a{v4 zrSDJG_cp`G@?#YFL;jOK&)0G?QzXhv>7QwcbNeeb1sI+!D{~%U8(|%^+A>0 zNS|NU@^@*V9jEm|ZPTOA;ETpjFyDQ%ZM~-IK-l88B4RBL15m;r2`s3P%PFRK_1#{P zl?y9ksfAwtmGjTn4ViF60_P;=#HtsRC+hkX9}9d^d`$s!svck|nyE=FfG+dk?h|$# z+>v~{gjGX?LfEKzb>*dyt(W9_!qlbZ;?uL%b?>I0_;cq*Qk9wS%Te+OPTRr9t~E{G@k`L zgn{$b-m!h56Btj}VV?3Tkk*{g8a}u!oZOQGmQ&3g0BfeMB18%iFl=$JmOv@+;bj)^ zax_AN_Xb8O^C1~%(@YRzihF00kEzgtS}$$N`P9T6AZy`k{GsCe2>oyK^PCW~qvuBs zRnCo6pOcvVl?k$P2cvXc3cIU)RdBf*#0SipjwEDo-3(X*v) z5=7+ZDbTFv=`$F=ngiuyp8liHJbkOrJiW_jJx}6u>j&fi?KB1R)1*PoEu%_ni_9AP z4S~R?#CMpRu5TSZU3FqHnl5AvHn0>Xf+i>48`0zXhTqxRJAuR;G}N?YN8(s@7efuF zXAXzd$@y)oy?YxogDohQx)429*zBHIYT4cl=h>&3b31FyGcIO;k(JA)sC`gLlT+uc z@$sTlBB7w`v{GOD0tpUZwuf^GW^kD@nrwxAkotoNRXPx7rb_&)`?sJMU6gX$)IfNr z)U+cxbRkP_ElV^>OVQ+JNNGY5eHpV3aGpnu z6;s_OoDsA-y8oz`M9nmNta+LJ&568ddzxDnvI!bIm+CDzP(x~f=Bb`Zv{82K(JGRbvwA|C@#A>BD zBRQlY$66TR2Qs+qhEQPq>4}5d5+pZH6n-Ek&!Af5OHa%NhOkEG$r?q(>wCdMZF-3_ zR``YZsE=X&VJ{FD|8bldAPem-P_(U^x3BIXTA5U*)W^By) zW4g&dQagCbjFyR)6W^)J zJkW5((@Vi;bV0IG$Hkn>J>>j`9ILbY4d}fDlp$;T^akS7Ou|(Ao~;#Sq!p@&G7=K` z(sD2M<&_cnQxCNS+QS#0b9w;7M*;3E9Mw`V_U!RzE*@wa&$n_fUi$)g+*Wl!?^))i zGx0{Gr6AmJ=)wdcG?TGdZHt%HS`r2$On$36M2FqPg*n0v>TIF!4TsKLToTQ-_M~$` zVYsEk9D~Y+!IjhdtLrFxU{~!4AH`1jfICdE*ay_*H;W!5ge|1iBT1R^Qzt*uF9doYVWC6+O6HkJqRXkVo56YV8;@`H&%ET_Ui1O_5hk8am z9M~U~XsVi3eFlBL`VOm(K_B;(&$%%V`^tx&A*ZKF|0U`oRvf4BnBV4BIjdyUyx35E zR^p_VFrKf35tz0sexXptfea;6IsUAKJ8(roTrqI=BFedX7n;buE`Y(sZrmCrh5v!K zN={J(;18@qOg`@{mk>$pgGO_f#myaG{wT_ghdb8Hwj+fIJlNjGyvpX-iw^J^^=s0X z5T8KYldk^*>i0wKT@=B>IHqCQE{EOeJaTl|n&vWFb;WEC21=_T13{gWj-@vnR zy=_~=q7=<@ykFl&T9mm=MI|IZ zg+-lU`jy&PD7WosE47)rTx3c0<5Ofb2A(jlI$)=*Mo~udo-qB=F}WO%x(-K1p+Qg) z=^L+k_r*+=$XhmyePg3TRUhd~C-ai<38BC(6>(Q+fKu2{Ub6sIN}YhtjIhsDz6rrBq2f*4`dP6 z3<;k)XE0^!w+Rj8Ubv-NW@aoW^v zSM3l(g)}T;&MuTVE#}03-%ld#E8-yC=(qyG4Z}rm<078N{c~7q(8otK8F62ILY_XZ zV*W7JpgM$CyJfSbr!{2DKE7M;reWE#8dA(x0pR0A{Ec7%rZ`XAInbbha)w_*tAMXg zeo*DPR*6HU^Bp-mCsZY@w{JwKRvF7NTxq1Q zF_PPiyeiR22b6)%NIJIr}EZz2p*L|FfABdTx(()X{TyD5AK1(2u{l5WH z0c2*R0O$|^8|Lscxfj46a>AQs4HEf(i0x7CJ^0{gEZ2(HhihZKs2|0Bsc!x;j zs@;S3%s`+6eWpWPjfOf$A8C;bw{weL+X6y3zqPTEGvN^A9UYd+!a5j6-mjE(A02wcxA*>X*OnM6)aVhsS~7)*+mI1JofL_WS8vd`H+}OeCV5G{o0Xy>E`(iP6RK#DBrjTV`DmF>9l;0jX_BIk-&pwc-(;k{+vUD#~W^mIo{6F^+_$ITKql7t30>{5Xv=f~eJi=nrf)KvU!iY#d&GP!Klc5Gw(S<=*`AkPFhO4YlpEralyLP4`|0edYUhkdPvhoyNb0^s8Mt2E?9eF zs+qRiVJ}q59^oRz=0{~hWOHaqU;F4~e_~35n33=2UxiYLR=_D*zFh;_ohqGsybk*{ zWGo%xwUCu#A5C9~3Gf|mEy(!;z<;lAYo8DkcMTvlYk@9)jG0^`5}`u^tbQusXKvJ{ zb>-!hCp`C;Ho2Fr8$2<7k$8wl*^Hx57{_pk--XA35(qFQ1TS}ME`UOLpIwy^!yYvC zvs)fW%hC=r1(DEhvK}9qnz>s?QGQs_Fu^?v1SMiP8On##L#p(xlX;}wFAkMQ>3*4s zl>24LOqKqXhgyn_1Y+JsI%c*0*(r|Qth$%o)8T%WO#32!&hUv3yGjfy>9QY6eJqtp z!u~ePE`-`3S|B&-6T|l5i4b)8sUZ{;y3CA0jjLLP7Rx+Uic@6>G#sDsAtAz7Fc-1( z=mkI>vGzx;_32xk)d8akH@+iM_!+WGC2{KR1)0XI(u7?Z z;Gx8+sQNB5^$Q`UsafT7Q?o{x>E-aP_BD_k2TZE_Rf@~f@kz)*Rj!Tod^FRSmG_7% z3#PLvP}>Vq%k%9S%|FzoV*XPDGyJE<#|Ex(#!oYz3BwqM1h*|7HfCM=E@ygJMMLa@ za#!IO=9pI!pTb590>>qYQMu&!HD6M{!Gq0egE?kP;xp?q{_NauSora?lHIPjFWFrh z9~#_(kDsFmqrl?Kds$+H-MeFwK&)^FB~eV`3=(GQUKwYmz{;d<_jDh_{8f)U%q>ku z^XK#=$6_ykg5n+NMp zY|LkgKY~OcZ2VXc_Vnb(uJ{Sbk4xjF!7mbwb7TV3tc-KyjgnxTBd?MK;~Y7WL~P6( zxhdt|wh}#&m!`*`Vmuo;_R!o^ftkLtK=6r8>?QRQs(%K7?Iksqln4Yd7%HshKc~tV ziBw?Y^Y2zuHohFYp7E8x=@LSpKwFf*06^hG@`cXJ?5RT|Ii3^gq+I4&W#%mFAw z2l>!=)^$a52SFm7vsn#pL~T7<(vr23S#9;>Z?N3E7GEBZtgd41k~}G1ZM7xU8S!yv7LXGk1BZ#9 zDnBPD4;5yPx7yA;Px6mBvwkQK^UR?h9&Vvaa*b4Wbcl{9^aomw|4Zt1%g;aM=S}(f zoBX^YKikec?<5Lsl)4?sw>=p=DA=Q~PB)epK!5R^AHtirO9L4;e!Q+K3w+`Dl3q~1 zV}m-^26eVHdae9S6Hqy-V}m-`1~sHX&5X?hS1Bd?4v*Inl6S2947Gt91Pt*aW9`0T zwg*T0BYv}{X@}f=ggd=8X4G`@Z!71x&?gE~0l6p2#DOOI6!#r$lG*ak1}o!k*FM+0 zEOOMjkPvY*6sW??t0f7ERTbr&IR3fSmGP0e!y$94<27OnPTy98EVZFvZgs+uct?0m zz#Nm2CzjU@1?KiI03m{RAZ~jHuvj6Q{1QSNtKW-31|@TlHjO;~9k$00)E}5h`*?^B zbvX-Ok$F=$OC!2jY0qv=q)+IXx`Nhd2g}x0^~F0>7)TeNk}xr7i;1U>wsTQftk(E?isQ2F%1yC%i?3J(_eHy5)K1Yv&!&f+e zpZEuRSEQ2f>0EtB_;54$`NARQm^U#iacc>u`_#@Wdw{GIAOo_qLOBad9y;R7uhZM= zC|irK?=6i!nWKpHmcC&WqxX^8V4HoEE|xy?akiy(QPg_)ix3jwDq+Nfx#`Pjp2RNi z#>@4y{}0Frk7c=!xar4Fq&fwx$5;7 zFylCw;zh3`*WT!#0~a{&jcTXkuL6$y=&NiB1 zJ|b;X?sL7@TLE7Q70%Ta;#2A1+jNjyCR#wzG*0|f_EA0k7a3@<2UNz2^u#)=1SQ%D zK>A$LAgucm`c(JH$%=d&7Od_dV2e*nqTzzzG%*UXT+vu8`QC)4atU^ilzakr45VT*@) zUd;0vZjpP+r70ouG^Ni7WJiBAPvRxAqrb>K-_=jzI;4HMw1IECbKgGEPobCn_f1{Q zDie5!Kan%i2%p{2uKbgq-ugmjRbYQYW8f_XhRM-4y`;)ath^*vhrC*3ES3d*{m; zOe7_?=3StlrJnmUV8U;K<{uQ$jsW4{qXZZq!2CX`fq94r!2Fa)+ALIm!Yo6Hd<*$? z%3t$1o6|neA;xOA=|-dj2_WN-GEKgj29l@nk1Gl=e~8H789D+`wrs~u+VGECKkmQ5XtF)$5vEw^#{IHv>dVdaeAf#OAL+t35aDA1 z$isBai0>Iu1}+gZO(1b2UXg!{n~Y zwEJ|8(Jf}%;iBZ8i@8?6iz@|N#h{joY_Di3S{d0Mz)5N1B^LZ9KXxX5EjpekYKVGX zL_N%=3t7>3+q#~%b$#vl<)W7ZcsI;?4mwPDvP`D3O%Ct19pcZ1jo4^f<2#Wg-mE|= z;g8vI_;+AK?9D)fgwAml;hTX*So8t@MrJog6{4lqaIaxDQ?drPMKg8AkZ7(>937plu3WkUi6KhJ6sOO z0s`xR#9sD2_XOr+(a{UVMZ@JDR|>Ibeud(y|C*vtm~iDvyVnf9k$8`vILh;z@zcmU zWG3&HNWWb1VxYg13K*)qCg%{v6Z#MNb8Yk~UH5gv^s{$rhl~E3LDr;0(y_F3cD^O9 zBzeCd=skO?7YD$uE@Byg3#tRqR`qezCN&a1t9#E8sh-;G$5iPRefPUPAN2jgPHj+d z-^vBzmcGxz8hM%!j;dW8Af>=Q3J9OJ=OW)^KX@p$#BQd{FU_oD|A=H(HS!V zo7RK?c?_woX4MfhHDNs;sP>~=<6nUqbNp_UzwRH(vJxKF<9}d(9me@koP+x*O6B3c zPj;||8?*O?Uul{%q(bH-xYEHnQ7`92J@A$QXGP0-hBz8;P zr`eAR9(g-9HVI_szG5DRb-8BmU;|B%t)8yw$`57rc7|aUelae2B z@n!OZqWA#*tf$d@51=Kx3yh|xb;4yd33W+!4>085ABiHP3Biq^a%cR`s%*E|3Dg|K z7`fS-tuzu@t&i@xut)dINk?p)(jg03w{3691)rG#)ze+LiMeUU47SPNtCB{Bs{eYe z$9r;+otSydBp#w>+yOlr_*Qzt2xu0Hbb7)dNw5bd43$JNiBH&{HB}#pb!=cbU0bMT zY}ZTd0?8Q)CE+i$<)*A^uetLjlgj~&CZTLd=|)pGiSS>%0~qAzgBG6`Hh%s0g=g1V zcd!ERB1V%pd-&i)86sDtjCvQ$VFXNqhH^cd{(lo^mLkZR-ZPa##($1v4Ir&gU; z^tcXn^Cid&1MT#6q-6lx^#C^2rta`ajdkiusSyKa)0=r(I`E_~D^&N>yxq~-=fIZm z&)pbtxNT+~c09IF)i!;&`$cpe_%d->rH}EUhxt_ zAGkMp=`b&JCNtEUG;CFyDqxn~dz1CUz7Qi=e+fV{B0ZmC6Y%CnJVyuU15R?nDVT2w z#s8WZ{h?fv*c+qI;S{;hVgtkF&wh==|K6&~A=R)zefNgXhGh`2V8Gs_eglqxx$RUf zVf>>CB>k`9{D0?0v9E;lcj`>S`MDHLumDL1!E?kZ{wHU5HL$QxzfQhIzO&rNd0}y{ zzaZQOqFSu|Xqlp5=Q0nG5bno|AQ53UaeqXv+Qyq%J@PHmiL-Tq)P&7a z99DmBzGS!SNU$|LXw z_~RiacipueDT<}eEkp_yzMU>Eqbq}AHC$>Ykckx+0 z2NOZ**hHGIwp>VY^xebP2tBjYpGjK&v*jDD%pcyfcsz8#)}-y&_LmHqllW*^DZfHm zCXG?J^rV5{MXWyAvWNgd_RE=4^)R$KZ%WE_-P68%jE4PhW_3Y)xTU=}&F;Iwd%~ku zVHq*GL)eD42d1?9V5yAvZ&NH+YrlGv?0VC`zMkb?;nDT_)iLZkt@?)8uj%VjyUtho zdJQHFj4O|-_f6}WHFQ8GvOgPxR^i(_$ zRN+ROwNNwBX!$!{n)hoH<%wLF$=-xwOe#0)xiK@?y}*bB7CFUoG^w1kYne;sGzQeJ zbJEx|Xdx4%#qb(nVR=Zq@^`_0BtZZbM8rcr#q!z7t;wvN4SLCC2Edf;0MKlQSfyMd`-p&=9{CiTy`~C2GU2N0W!BI!*I|PsPLcB< zJN`fR-UKens{J3n*%S~QTqDgK6%#{rN;4FhW)yTrA#p2ga}Xg^lnDmKaskl-N0N5y z=_#I8Tcu5@tOhkjGsCpfHnDnga9YSpu~Pou>zwP%43j-QzyJIBy#M$8yzf1qd#?L? zuCrcexzD-Ia+`4wKoFkY7G2tJmeW{&c`AJn^s3*NL|JybsOq9_WUp@Z%Xmr~+o-E}h8}cVj-)4lk>)&@ zuAA_Y=VIL!vGSuK&NZ$~x0d?F7k8=tx>`XnRyU~wPB$dz{K z-n^;_p?GWn?NrsHr61B!y;{S5la%((nzker3q!PSTJbotPMc=S;4NqzDO^G0srQ;Z zb4_efA5KT_(-_npwDjF$9rT{N9$E+wIy@-G@3~(AkW;Xj7FstJ0Iz2?6?CIm3nSO! zP_M#H@56am&_Q|-19dUNM?2`gXx zk5VWGct|079~($LNXhC$$#Pn%rvzC-+9-oiZLpMLc97?ZT@wvKyC;e;g|Zw~KA);Q zvR9s}>i1nhyr+a-5L=OYxkvByj=50DX^Ds@6sL~YAcJX-m(=4zhvDtC8{|Ov+z#pz zSND6YGn&!rD=ONMtJIo80r&tJ;d3DuTE>jTbb>6D3s8>5TM1p>kI_TzzLT;`G)xXO zOlXd1VWi0ZJHpn&2YmNQj+@a-|Lbw{^^_eVs^a~tdl6%HZ;w@~a_5-}m3a-+;dAUkxo$ zY#0PX_ovi$Ih)WSM^G6t71pSjQ`%zIk2K^qwv)PCltQhBb8(SNXW|IQGz*3c&>E4H?1&jQ#)!MnA&J%6#H64>8 z zr@#Xg-q+fWW89qO&18V~I7F;xDsSxw-5sHrs8N=y%{X(q6IrP4^~x_GV=`N5(?kqS zD8oCvX-eH<9cw1EYs$!lPaa@R-Fp^il^!W;S%yE=J_n4q%h4Dk6VbzRIhJ95DDPK5 zXq^^WoE6O+v1~fw_Xdm8DI<>a&~Qi>&ez;4+GQvC;9hwNdE=C6@!HtrXCr@zutA=> zYy8aDi02BG>`4?D%BvHj0<3{;fyxN_LuntwRqdTJ7+4Z;o|H6;C2N2w5|TIW%j7eq z-Tfq4uYM&S&u>XV-iAe`VR=8pc+8|$IzC%RI+*U1rfBymysNMc%KZb;idB#;y53VE zTWIphMwt7M;OxTia<|@6@qJLqOXx^ZIYWi=&MRO*HK*lS*hD#lQ!Km+`#c{ZVxe1% zcyVE!VEGM*Q@d+NJ!;J=ygy;@GI>r*4QWjCX8DtS#M*23p4VYkU5^)qCRar| z!?q)cwT;yH_DBD+WSCes`P6J|#l^0rx>Ljkg%{*CI;M9K-X#d)D6`O^}tKilmYZK*;V{DC`>S_JfJJ=Ar69&e9|+i;xD7ZCpD z9}yt>F;E7)=e`@(JiC?BfruS!6>M*XVrJEH4g z8@f_&hsC=$vHUv=(gmf&buAz?j=~4QV1n+m{CXR{3yUO_pbPk+qnEqP=llC_W0W)u z9akC`e4DZM8wU<`-sfI27+Md^bfN=qu5YDEQ)ZAR>#^t$g+&@JgrT*fJy^H81%}pk zeAe#aI1>e|+kDPa^?Y0sJ!7q3)vIv^Jd(O-TO2+FftD5XrXxSJIx*aN*LoU@f3h&) z_j@?L?T8G@NIttHcS`lB;Z`RWyqvQppI)lh^+y5^Lgwc7LzwNwW;Wf`?^#3e>O5ji zZeQ{>j{m@V2`i?)F198d)%8!MF*erbMfIo*?NNziTc!tzCh!FIQNsVFPtYI-)wKnO z2jW3j*$GL&cCLNc^s{XrC9>rPDu4$(_#!Bd8u)T@^|%CdsYUQBq1?y!t9GX+|LBOz z)Ae72NQ>bkI`d7IgoC;*k(I&bN}Mt665BogO;N)N7>fEze6E+GngO?iGo0^dBz&Oj zUqO7rSgM~s%`dUN2 z#Qb0}+Avx!AYwn%My}m^4a3p8f(H2mgFCoU^)=tN#Qs=o?bAvCV$Vt zDPwGtwLsO9@E*zw-bNzxmM5eqe_z}Sb1eHX_IzGs8dlMZ{6s=@6*Lr0cY0dxLB|6I z#l6syAb+c5{wk~RL|5@MtfV{7w&a7-sEdj#eRh^gFzLdnRMS@uMd5X>7uAavPUa`T zynH9sKefHsSD$J8Bia#+dNNQjoS))@=ul&nTtKxEe;<~N>d+`;peex$i_b8{gX3ke zBW+9o`@@n!sW_YS?QP!np$pnFdA_H3;fS^+BjF$|ZK-H)FGQLxWl znSI-<$7jm7pNNckSBU1077-qZLhioq4U#Zkj_O2F*%-?H6sBat?)wHJmC7JJRblMN z$2hE8XdJDQj=_P7{k-+X1Xn6eeZII^zSaA^5ys z=!YjxZ4WBZgKY^yy~L7~qa<8v1dxxlLlI~N4iAHZ$7|?RhKL-+PmO0|ucdFyRA2Gj z3=I4#zCr>1)SB=q9WQ_kZqQx(nYDU6j+iWjqNA!& zQAE0W!ieY~H1q2^h&xZ`Xvouj;v$Xb2sGrnZB4Xf3RkAW3@JtqN&!#9|8 zckebk8xrt2@&?@<9}s8#?WH2(``eGf+cQDrcTfw>^AZdu7~DerDS{ycLt3aaK@gu9 zM5zWOPMa%+l9W z6ZSxpt6REkymS4X^?t(pEy$CLsQeb}xrs=KNab&#o=iev9xIVfp7BIFdyE)4C{HYrXiqmHT|FeB-$W`u)LpX|WtaPd7%nK z327HiqAs{f4#|dw6DO8rU~v$t0*PWrUd@{4Q5=2Hf}Kro)?`!2KNVmvO(2+l6~K?)|tw!u>h!uW{1~a%7%RF^TrrgbGW`Y0wDw zbQf*)wx#d9h5@b1W1|-MI4!F^Nw0JCg7of2Osi45fa%g1?etA4=$+b8YkpfAi8ks~ zYWRhrB|qSWYC%^!|OsQ3!J4~di!?w=d|`h9Q@K5%Q2c@*@|ODLeL4Dh$R)R&1CE`vkq+p4ppdcMQe{sXQ^mzb;qNpXh}e9XtV>dE!rKjZy$!>J>0|b z!8^Ik*Acl}`vOXH=s__`{mfy-6a;#g=oep#!3!cP7GWc{LI)_+?&w;NMl3M|y(ryT zU51#XQeix3c4Dz_s0n4U4Mz%ohBeXfubZWMN^AKT3Zf*;J#h=oF6+g2S<=FJA2Q8^ zX2e@YzVF^T2oZ5Y!cIuo2?mg+1iO~WaG$f1x_ryV&YQ$*k7GduU6 zCyT~l%>Yy@Ibst~F?Ftyf)Kp!pwmI`r?1O02ek!_S5T^>l%=h0c1&}o z*~zp3rpc|k4LANLKT|P*EhdC1elEFC>Rk zq-5Ft79|QFty+4;UzO`j^i@Q;UQPnT{N4L-vAeS<+r8Y=zmn3T^-I3ey&I()HXn!0 zzZ8Q*Mp;a`^%T*C4>uvvbbhpy88+TSW)LlUSiQY+b8mScbsb47>EjuW)W-Xg{V;Rz zA;{{fsv-<`r@Vqe=MXv<601s4N4%^s_du{lp7KPZ9mL$qpQ=y@*iM``nSrm7zwchF zCn2H^I8g_jr~}oxF{-aCRA=6PkvKOUZ5EnMtWm@UAuRo%6GEJC!M`hj{NC1e}D zIjR)VZ=66(V1SwX4aq!PkNp$qded5bbqP7J-ax%yAmY95LCwesp`GaLei}h}x?!x_ zyd=?ms*b{qLfSg?^W9%T1&VTUl-kHGAB$%UeWOPr8Z^{$?xyMfXvBod-vQ4~83Q{0 zU#yuTjcGshf42tZf!`x9Ik=>&To$oCSajt74VG?eWI}^jg%s|$*53E+4thO0ri}ay z``~t1+S}j$8nLVnz1Uno3l9%s0G_b@K$|7Uo$mZKBjnT+=e?&hM`6vw(q3YjLSl7B zyUx1b`BQa9kj{D}`OB3%WD-$~ymQGIA5cM>vtvrmn$uZuz1CHF#FB#kzgBzz&7Ik~ zW_K%=@OpNszIO**ll5=G3p3#S5TRz=EMm z;Fnmt`?ICt2dqRN=R%mLPNlXb1OerO-skVnBHXNt+;Py*Mn8+MS9s=Sq6H zPd+c~y1ROvd#*+bE>DiOqbe6)YRRG9H1qH_!b_UgJ~p_Kf}VRwYNt}0kOFiWQk&vE z7rCEiuXccPOk$q%YpSU1(fHIm)v6Z@5b4YlIv}QomN@2wqg6&ov}7OQe)n(41X)$x zQ)H72aF9C%t(sVNzYQ5N@-q~Ie(k+tuRx#cuzLVPM@ch!BT?~QJLY5Q%=r$g)O|FU z2stDnzbk$_2R(HcuMB8@M388y#`^*< z!c5#*w08xsAX?UlSA$+GdZd?%#eEi$K)>@bNQDLT<1<2cij1M7UuoT&|EYl0`On~- z1nU0c`-7+97>l;C>%$q=^ALUiX16r|$vsi+* z2w%I6etZJLX}&5QWpA<*i-G?Yq)m3FwxPq5b{wCF&IrLlg1L{j_qq*2tV-KTih@~B zo0D#hFNw0^^|0n2q@@PYGH7IYr5oG{(`jeLI~P z*xq2puJPsHL+h)%6Lm?O;#~6`*%`0flHO*VsC)Z~0j3ABFQ|Q|*|{@edsW)zbZ6S5 z=?S~ibMX13M%{*L^dHfRA?uzRPeJwMpzb|P-mD9*OpV6My9OL#Volz^{6{FgR`$b? z1z)Jf@pe$(eeC|BAHB?GJq<5%;05~4i12TSh+9Oqa(R$Zxc+$garC@#EJqshC+!m_ z7Vi8Js=L?K?n-qwV^oliQh{QS0UNXENVa?2Pkcf3q77X!>Nz{pHsgrToJLE?Pc2wt z2rX-;t95TbuNUdV+bP|dfrRbHf#mr9oo>SkSV4H3(c?er++A@-1=Z>o)Mf;AR0K7~ z8&o)gvWEPc=KMC@xoCIGMhG>%x?h#Ky?v^4jfavnVO@G#%BhA>$AAgzQrnD`X`Lw_ z!^@8SKA{pExn2c6clKDZt-Pz%IUYUp@VxP3##vWfmR`LiC_TC4QC-DyL@>j-m#t%c z1SR>*VU$2haXGjc&3XFLaLV~HEDHV+#m}bXp#-2&O`<>X(b_OX7V_1J8kOeUhci*` zK|eZUtJ}PT&OH^y1g8IStRS6gy$o zJ0TzA7@X8GB@gIUqqW1~)Egp8jP=oY%&SV<;Pa^3gp;UeZ4ka>vM(>eR>g`3u$l@w z6s;Z0ud&{0MyN%I;9(evqT@tu$au5mJM;36f4v`y-shwf#KEM;TL~G(q;H2lEGkONQ56zg*5$$Ajd^0>m zBW?}BG(oGy8QOqkU?;buDV`dU&Q+6eYUaDe4jTEHEXltX$F1vx{2v!t645>>d{;;` z-sR%kl$Po>aaSP>%vlwE!fU$F>`J|X*f5xIn3`P^w&G&5HTK~fL`SbXrpFCIlR|MW z5JsK97Tr%`I58Yzz^iMw#kpR@oJ5gADX4xpj?##pI^ItUF*2m{MR3CI`Q)&Vq9f$6 zdjH?aK@Tm--^)Q)u?897mBnz~)=pRx9Gq9t7KyJkO$rar+Z_^(PT7uNEa(!;wjdQF zOXdCZN(E;%39(>77sL|s8N}1xYaNOKdftJuxt(OQKk+=W8G!D|edx#D5Et)0r5~S6mlRn0W)Z5a|WWE!G7Nm^utm18u4;S*o~@)nCn*4tON=C#$nz%xjv4>Lxr)f zn-n++5`_^3aiNI{qyL!~SQdrFRn(KY#!pEHt4GqXMKR~6igV{m63x5JNkQg{>`@w? zXziYP7(IsEuK%Fh_EGGTwo2uwwjrX|W>X^rO^#|15o zK-^B#!may}x-&_7IxHT_LM%%Pc86kchiAOio81NIONlM)KYl7-S2(~3y`RwL@k!}z zNxEvRdDfDq#FuoVp||HQj4P>!z8#judh0QjNw83&Q!d8n)_|o|WwhcA);h%*)jZjm zekQ)X*Tw|t{cGUr-p**GHRiYDnU)GgV3J?AHWMCQT^)y&1S^^-Ba?s8Ra_1;C<_mn zQR{SDv3xYG(S0@Qi*9R1V;(lUjWMIOO06n6lvi;kvZUYZ^e%NL!h(1r8yA?{caa6W z>riVmlK&`rr3GaJd%Le0ig>0qiEnV?fK)14dgvd~J!mpJKXM;M{^HYOnzJ;#g_JNz z^}ZvN0q;{Dpb2i#wH1!i{s(?fCn?659s?CqwaX4kb z^K?hEVZX!do{na}v|%B_a_WOS)AwR`Qihn2_}DT4ef%`UQhY({FwRA|l;nz+SFhXS zMS7eo4kOxWP54+E&g-rYy8w|!N4*(CDLpI};1MmvF#6K`1px)VYQiWnHB8@4a~^aL zK)GwFI7{cYc1W`r!0tJf+L&Z=gq0clIQmmfVZSayimCFn(C5&dY2PI5=FcSO`(E>O z=qia@sK*Rw+w_|FO1%5@Mhwi}M2Vzx>Ee?6{R27WSZv9`(%`TM@Ql|vX&6g_J+qu9 z+O|ZS*)D`4=vKjuI^(_%O)o9(4Q+><;3``(u)dder93x;{D@9hwiA#-SQuPZ*7BU5 z?_m5y9S&?Orjbhw4TjY>o@gu_?T&;^IWF^zM>{w?Il2JVy7&@|fw7d$_6bGRHwKQv zGN91yx-F1;MYGA%-TmG_m2-FO!y+u`;~>|Hf#hz7rD6cm)Y!gjJleGJVkz?u=wfth zXl;^QLq!8PD-LIkejdFEoG#W65yEk;dYC*WO}a=EY_g2rC{4~FRM;`Vxeg)|A3CeW8h zFOw!GA#sQy3nrVTNwYMu!Q@G4vY$*uXreDC^`O<`pl`sUwGW_x#!>Ol3Om?{g-zo> zO7#g`2wg4Z71lRl>;)x;P)G`u&s%8RkikdLlsrkTL-mv-JU)cS>XI-T{7kXC*9(S| z`qmg%u9V1te6ODMyStE}(`f>9Cx}QpE%e-9K2s~~?m^GhP8*)#d5ZL4qeLUvb3z}x z2e9WfuKd{DL!ufW3M!PK(^(7Xc4179=S}qds(h}a=S}jN_9nO=7SG;gf{+BhT|hc% zXIS}QG}%qG=1G)tsox#x!aE%&>}P#$@nAC)$9i0v4 z1ZwNgd=T#oOsm2s`Q>;Ax&pNHi3id4 zL4AeECLDhuwOCP&W%CXsVf{3XcKj91)S$J{{vQY{kCjvL?%$BBcss<2ek*lWlf>*J z>5~lZ4CE1*coAcSgQLr6Ba6rm=#ii)LZU5VHrXIPy@mC%drF3L^Fy5)ERk-t46wLW z$IboB{Zbp{Yhq<%8WjgMpZmE`%$hd4KgV=MjaU%%o;!vLbQ^UtkCnzuu(zm z@@?V^doTq^Jt+~U%P%H45h_uJjrWMTCpv@Nny0v{#Jbq`oULvhUepv+f7Gk6L|v$9Z?t)ddh2L)|CSim2 zlA;r3>s-ljYJUu=)e}NM>B66Y69)y$ z`Wt;f@XnlFiW6?0B?;@{#w7RxxM=3bkZ-+fD)B;%+Zz5V9>aH zYIP_nnH$hMr&cV&OrROvtJ=nK*t|XQ| zKb4B_Px(1-sfHiuS=dsO!@8kuD8AN;2C=$V7@{CLuP#5`@^ zSwieOR$(914RN86UlgXaU{G@T$UV9s zd@kWDy`@VN305x}{Syl`Tgp!;TQS#TP7 zPrNoWp)PX8T0>lESV)c6mMO}b+*H)XX$d!nG~(F*qONA>yenMsX~}zwn(+qi;Gti? z2z{dJl!64x5bTdf#PFi&PiOc!X^FFYHT5K56y-FRnD1EsgtAy~kKI1y5QpW`0 zGbTCH#z8GoJ~lIrMyDfPuXjP?oi}E3EEMHPr24)almJKXrc!eR$F5^Jq$Bc;SOo6g zjz3ab!gJb}i9Se%(~McIy*TXD9Sx?)Kd|XYw|%JO=x3On*p$^~KBa{^dYQ57I3vdr zdipRvj*t|yHU%yN+F46g zDStcN>YR;4oy3}W3(k+DrfNSX$NT zUOxKbPiOW0`SRlr{1NOwXV(`#fr zt?BsC`eqPc>AKCI=2>-%cUUXhbh6ws31c=#0$OQTPP z)u1*8>(-vbyJqLtD`H)kq1t1uIMyHCZ@o2Pk2T>yMo5Dd+qB?*NiH02gG1dNcyCZ= z-!J~}o?R7$+pMQQ6aCrnS6&8-L~EJ4m^Oo2${WaAYq?tz5lHctjA{#lKh~Uq%@z%w zuu0B`AF2^j{nXt4_0E5-T(u@bLtU0ndhZkFVc5*ZPpJ`QWGOYGRuY#WLCx=;Tcy3* zwp8YTlK;DBk0buaDgMVP{>Lf)$2tDTIsV7XW&Dqq|Igz8s$cw999QwDCmDZwlJTb} z8UGc>Mf`~%{zUxauY0jgCPBp?S##kQ2^ipo>kef07xml*c=9CWk zmxoe=+l;l!$VB=x(4UF^uxuFFNq=_a897FtNzz87csue8fgFRnpYkk}^6aoT&7v?y zlu1$}Nv$MxlDH&klB8LXpjH%Q-}y~}^)(P*dkw93q19lGKRlnEHV?reI(ATP8D`m#&+*$C|uHw|XkZixnw#uZX7PB;NMaBIr?uE};i4 zR$JOHz1j+;6r4=jo1VOvnwBfx7R4|JP0PFI3WHJk3JZdpxh>J%z5@kE7SHoGHoDs% zg9Y|pcSj0mpSQh)myx)q>+yG;34e1@Uspxoo`x#CYBs9ys%#s~*OkE^(7S6TeaI#0 zW6ea*uBs}N^sZV-A9P9juVzqgqAcax|J+jPDoWANoTs%iiDJdARpI(bjoYL3s@;N_ ztoJKdt>+eCCCp?CKxX^|Gn>~fKNY4&mQg#FE*du=G!BG_0EsptT{aa3eob8sB=Bc> z9f!P*le~_TypI1j@;c7)I?nPs&hk3`zb~(O=aturwNEdTIWiy%qjXPQ&jaKBZ)*KogP7iN|@vk0TNAidIS+FZ<0p@ zs7WFsK+O^fQozl*xBlv4x?|S@dQG~R)NZZV8X04)aL1f#&^cluJX%VwivcgRn_1T_ z0!w4l6I@VRSaC{&;}+e}AWv_RJHGytZm2m%H#9={ri)4M){M>DBnz0^bwjN-V&EfL z>D^4$_B}Angdx7`lG+Vl(n{~9hlv3udNPU8#YA;0d69gyRy42D#W%>1Lmhqm!s~`L zzAdmC+6^9-w8Ey&5u$_RZhj~J`s^dI(3CnBoNl1eBDR%n2^t$3S#l#Sk7{ppOve_Q zt1XgQ^0oENGQO;Dw4ak#b!g<^b{#gjwBc{5%i_ehB;mnU%)W#}bm2>M8PsZ@Zn9#K zvv!B$1B}FI?S4h$dE;Z6g^?GAyC>FVKrggYe`CuV%;E3 zkoMBUN~wMT0c;=@U?4zAocpd+8b-kve7^`Bc$yXNUQUKpS@G`AiV(fsX?o-g9TbQp z#?$FTh>_Hri6~jz>CzKBJh5BkRv3$O4!)@<{jtGg3&j>AKet|*g6Nuu&I5k9|@zG<*m+o9!CQN6Q(s7Xa*Wp08&#HyTuA+%L=_lEA0PCmU-Ydrs6h;bY%H$ zQk>oKAVe8~2O-8-Qp~y+V3`?8a+jBbNGa<^+gszloFeMXAj#<6gC!^hzb2@J2wh#ToD zKSN~%s)(vIz06B}fH(Qdo1_ljh_C!e|ND1jP>q$0PQ(zGw?04#E~<~`Hvc#&(~mls zmaVX3tB^d#^i6R7BnL{QHmUHbaXgQ0ZWbL7gGGCBaSSP>FFC7fHz)G;!s5yFI1xwsNQg3xUuCvefk#H#Y~4Y9m~|h^cto%rlm}4n7+t#7t>~@txSX0$Z&cyy_9JZ z({!fSF|{*Y&2%HvSD7|4{hVnl)2OvFoLHuDObtv^na*UI$JEYrInzp}4>5h7>2{`v znVw`Cbvw(Ese!49>2#)ZncA7IW_l;nhnYUl)Wvik)32ER!Ze)owKr3dnfRNj*UBhH zikd!=7;6=TxuVK`*=N#R;XvB@n&gMyC#n75`R#xAPia1{Nz(ZH&u=f#|2g(5yv+Fz&?sx&Xc_05=8TX2vSMZpJEpZH&9H|KY#O^s4Z?GiJ8dlQBlx;?gr#{>L+x zrB>61F~(qAT!sKVD!_hH0DoKnHU(e{W0gNM1Nd2tReWv;;M)T5ya4<80k|Ll+ZoeD z3SEu>erW(+9)QajEBUMm;8zCl*9Y)x0{Hg^@HYnFhXZhJfcqx`@TLI#d;qQsz^?}2 z`T)F*v67c70PhU2-xz@R1>h#es=PJ_;8w<}e6}%GCbaRa|<;if>@to%t5V z7ctIfjLwR^|nKj1MLS#ARTtXPm{jKjTuymoTmkaPMNQ+LtEA zO8$o!qthj><^cPxj8*)4o|ExY?UA0blD~m*SN6}uSf$6tSjn%9@nE*UiE$j`rT~5$ zV=N^VSA2U%d080inQv!2knu*w@r)Z8U&^?RabL!e7jk%k{%3J{R_$A0`xIEe1KWer0QZ6AznR0Q2}ru;%$b{A zoI6j64>)IDZcYJcv149Nc0qx(2JFb6U+goOGbb;**x`?tF4DZXqYf}emp5%&n(gK-<-XdFDa1kcNH--O$?B+rpOw;*>2+|p%RGB*H|#D{1$$1;0v zVV<(4*$N85&=)(h9rJVaHW$0oB=iVF?`ZP@1J z=4F=@IP^J%iU zzsOMnNz9vHY%j>p$z7DY*r8vPZCB~c&DZ};RHWQ}_I$F^`=Vs?@&99#Y&nJYWq&0! zN_WXThrTG+QBt&6WRoq;qS>a;oM{!Y0AJ>-w^Ve*=l7V(zbe-l<8@5H+i?eUeo%eS_4YYS&p;CB0x&BFVKIwO`r{o_X zXMfwUKX2LM>_w;=^B2!|%ty;{Yi^M~udqn3%7x-*J910sJN$~jzb*LVEj<4*IIq3` zDsq(WKgscL{O?fT9r;DM+4KCWy{6hT{lI~Gc<;kGg>$l(X3ytl)N4L2g<3JRoiov{ z=u>lxbBgBMea(ZWA5YE3go5niVn41v+o6}Q5`VtAs3?0G%axs2=A)_?<>W8(b1ZD= zCloHSQwy!1I(GryDEf4?V}9Y!s7)n6iw zt)KD#eUA%ezvG6hWc+?flXTYNg^LT9F4pIk=H%KP^9vVqj7tB){#<0Mzg-^eu2Qu4 zmLf-Gx+B~fQKTVrZSd@nPpIKCxlUrPTe*dH|s4M(H{{L0Dn@WN-Swl39?0r77V|k6A z)e&n|5<8goWWJ)Q*^08KPuHl=L6O zqq44e|jaxDEKo5H3t zL;ftnt;cQ0eG%>=+!WU`+!WVxrmI28yaG4*vj#W0M?KPp;(mnjzd$LTPvE9-p2kh~ zuq;Z%Yd*$^Q0Sz{p-l21}6+5T9VW--lYx7?scEgyJ-cD01s zLez~!ZHh?0k3UnR^!s0?&N7@brY@#!Oby|ZZ)fUas*hlErgcnPnHnOQ&$N!I)7QzjMkO)bWOwZR!$~dX~RB`O+uLfWIQnv;4Y_i79wW)`MTu%?2g zkxtBPw`Ud>%+FZ{-?-B^Gq-5b{Ke3t6fVxlb>tV$BfIJ{#K`x|D5gnF$1ycAO=W6f zn$C14QybHKrlm~RFs)(wFwY2tfO=6nL)WS5Asf}qV(;yabCF2^V8=2NJ-NdwxX+2XH( z{K+K+nz`g=ZAPwLn+k>Q)rCv6)Z855G2B3Rxs2Qui3ga%eNH|o{FqPuZTKM?E%<}j z!o1|>l44e9z)wf+qPe+6#J~mr3ke|%e`5;Ii_ICDmWi9LjEoWU=8ae;{3SdO*D!qs zEkl21g1z{i+^fBX5x)r0y=zbfJbdnDeCTZ>3xtf9eDBMCxb#=XllqX<%TV#9-hnca z_GJ7!nwt?zx-yy4a|V7RAk%sHU*?w{GNIovj1I{T{q$ZFvMFs_G>bMCp)bNe^+eT` zi(E<6W9+V`|f{W)57xw9UQNN4(>jw-R6gT*iA@M^m zO&FFqe1ze$%SVp7V)T_sW5y;!$9|P5doCti^KQa@GIo^#7j@|NpZ5&EqGercIn=u}+?no^kcm%xTkS%)|+*S+lQ` z^8ai8|0Cod#4Ug!UiK472S`^E;}8io6Jw|a#bsek;~~1T7?YlrE*oRgsnV6tm~^Xj z*%_1WlCDz5q}!ycj4|mV>8fN*x=6Ze7?Tc@u8oYlN~qN`R`WKS7{@Tbjxp(E>8fYk zT|&*pn8t>5H8Sogq1MD$t%Eqsm~^moH8Un1G+l1S7fPtLGFIzg+8C>KAlguwAEf)G zE1WUrU&Ix|_+rMfjMX|IJ!3ud;~5WNY+yW)aT4P}j7^N=7+V+*W}L|w%Mrwt#dru~ z8)MQz)0NMdbjozu87D}nl`zQ_$1_&?b_3(#uqbo zG49W}iLst>Gvfh_TNw{ztPPX$8N@h-aU5ekw@D<0}}u8INY%#`sFc;fXT+^sPI(Vi}KN9M5Yqu zhjA<8WX9TMGQC0E!HZ!W%2?01Gh+ke2*xJHk&H7LM=`cB?!(y5IGJ%J;~?%ZZe$$6 zcoXAD#`TP&7&kKR!}u`cWX7$GgH(s|a+%%;#xaZ|8S5GMVQgTW%-F&>NKdr~{eQ+0 zdI7b3#*vImm3zjO%6*))zfrkoyh*udT(8_SZdC5$rTfFmJ!7|W&$vz5A13XGkL2_- zj%6IlIG%BkLE2AJ_8D80ea2bJ{wQfbU)g6|s_ZkaRQ8jk{f&yxc$4Cfll*$cXWXdx zCdogn_>A2Or%HaC!WN0cN6GX@rb`^FaHhoZj6;`8oW!^fV+&)c)zzToB(1Fm-E6j? zYW;_WHH|r}gCs36T{O=}7tQubO4?$=Qdddq9O%ju+Ek1m*uKFFYkF10@uq7&QbJb= zyCZEcUB%vX$?yt<_L|oBNSw#%kf&o{$@iGbS-3eG#gJ>KD(#& zCUh-i`wQ8>V$5>WRl@!{ID9+%b2EoSv%++dW}B{sLVHi^kd#~`e-WGC%;_U}lf-B~ zoNUtEwUik7Li4>e=RN~^eR)wiNz}$`Y8*u6B~eTB>Z7Tg(7GWtE}`<02(7;w2T{2p zoW}mi+2TpwbwyNe5-}T~##K~)2v2AGRE`qCl3y5tkR0E8@@aVw$bDVm?G( zRIU;+8=>4UM#(3v#$i;>5~0agd@660UKKu-yF?&mpUNL$6+V?iijT5S<&os4(qF9I z0&MpBUkqszU&({YX(D3F)f}Ode>1)QQMsk~DgUVaQu?@=Kx`TzO+_`@=t|N^qI{c>t zIk@~${qQe0R8JD&N~MeJC889ma#t$k;={KJ_dZPZ%iliLvqY3ql`gsZWuiCVsNPXJ zRJ<37bY^(dN$Vi|%Y|GImf@8n)l*szr^-3iSHh}ZP`xFr!l(L6`4T7xs?YxAXRb)M zFJ4s7{o_gX-5*oE?}%mnQ0by}z@NWJnhkmhF*Mo5tbG_GE&$ zzR3A8U;OBOM4@G|ec7Ij_vQ<|f2h4u^-Z=9KK^2ne*gA?>}UFw6O!Kym~<@PvR?bz zG5>lnLA15elCCyv@Kl+Z#k6VWP15BoD9xgYPYGJoj)9E0aOW<;mq`w9~q9X zKFM%Wz3KP&w`0C85-mZ;_)kSml9%*%vL7ZnT;oj-rJqbx$&m4y?d41N6TIz+lCyfR zl6%UDj_y?}Lj5b^j`!xPlAmgCWq6al>6G?oddrctZ}#S^w4d%5AM*b?@A_tGU)45M z-=_HS$$qA{9Z~5~{Rrv5*)RPhomt*`A^GD2+-C;xr+LeV^k21_vfoXmO})1z-{Mye zD7>rv-ur|zz3KP$_vM?O$^~6YZ>-vTrKcYzi-A@%UXDX#Z(Gw0JP*Qbt(N(R*`SW` zr;J^UUt-+ExR!A<;|+{k8Gpc7n;^sgh;a<#4;kwjKf~C-_;to6#yc5jGX9*gjj@_1 zuruDr{4&PxFs@oOgBUk6U-kc58Lwcz zmMZ15m9dH4t9im$=Bs*>#r#mVAJ2T%uSsHj2lH)gKa8=3`QG-K`D$Jwi*UB*d_UuA4z{4wJ!#!Za#8LNJBDPtG&D;XbTT*m2( zV7!s}2N-W+`~~BB#-A{5WW1O0VaBg9b~A2Z+{XA2y-BGr2wvV%*65T*hi%Q_YJW zW_|(l^V$6+jNQ!7SME7~A{nz6;}6=HJUWp7E26lNi6q z*uq%7*RvRJVSYa2ZOT8cZ))DPl=+p+SM#D*GOlF4nupxT@r`2sM&_&WMIEPi1oJmB z-@&+z-N!PnXZ~i!jf~Z}td{LZGygF2r!p?(@ZuS}nZJOsf%*Lzw=qAPv09hVm2tR5 z<`1jkv^ox7$NX64-^Vzf@ifNuY(Iu^67$t~Tg{6nGT*{{`L@R9N{;Vk%+F$eDPuJc zJ(h7k^H(!=#mo42V{G93zKU@v+pl6=$#@3iMh^dS#v7TxjIoXRgBfpP{>_ZDm>=e+}bi=J#S;&-`l{H!|MH_%P%3jNObMW!%Pilj5`gy%~p3mhsJE z+{*k57{@YSY7;Pz%lS2q`SHwO%Q%Vg(~K>QmoUy^{4ir}s0{x?#`(;@Ub$!fK*pua zpU1e8@!O0yGTy;>6XQC@^^EHoH!^;Y@nOch8HaQDeHgo$e*G2Y1h zGRE4aGX7&2Z(@Eq<5+fo5#xI1-^93)@nXh$w%?EOVdl?a+|2wd7`vIjoN*HK^^Dt? zKbLVl^Dkx`o-X5WV|PtimU>y*`6(|s&dt_T|4HJx(C+&4=kaWZ8fWM7 z?3BXup+)z%zkp{`6n`PlnkZZVEx*700%+j;G3h$|aUrx1{`i&vK50q)`K0Ca$4bks z!e8c>Ueb0-ZD5X{9#ERd{{rxp4>TW@h}bLt<@}49t)qE6(mT-pQh7;z)Li&LyDfeG z$axyl3dl>&)8u&NOmRtsl$D(1yv;mlx#cDGWaOVe_U+4%eERq2)2`o+SiJ|89(WPY zHpzLQn_26i^gG$m4$DjGZ{~XAE3ulbRQ7Yc@t1sGdL*9jji1B|y!kBg;sF2c-f}7V zYW7WqNBc_rxi`oczTI1HrF+tf`}0b-Ho@L(YTv;wR^6eDzY!Tl)4b$@wRLKKW1eR$fxCt!A}po|w|FX3q&z zea-fkUpWt`X0zq|I<*T*UUHtpS6?Mov+r{LJrNpf)o#oAvzxv0lJi{)z4b%REBWHD z=6#e2;Y73^N`7*FY@xRum-C8hHl60xY2H!IveUdZ&CmJjqnsa9v+2aAc_$SbVQL5I zUtV%v%bzdnn~zU;k+=NG`9J!Xmz)#fq{`Sd#Uwa_u@qF=- z^UP{h1Dz8><$bPKUJ|R-4jSHfawbT%7z(Qzudu3-a-LePl91~py{*`;0|5U3j)VwmCG@$r${y_Oa`h0pPDIe7Qh(Dj^OZ@pHQ57RKpWwqZ zk50SnRd}lY&}kjCmO*ml{KlW(htj+lazCGaY3~32l=FEibV@IkR~1{rG~Y|R>{Way z4rD{Vx#c{qPu}u`3V%%V{%Vy9t>d87`^t}82cTB35TDA6TKyu|uOy<3sP;py6R>;V z-?H?`e}DP-oBO9%&g(BhEaWBU-+l3s^99tN$QH(jWerkTxtBQK8y|`3!~l6oTS;-Pg_M~qWKJ6z#j)UGo=I8U6T~JyH*P?GHB2{qm|GL;U$< z)#PU%kF8$&(!AM+0}~)D*nE6}efDRG>!SdElNUeqAYFUBSlWNri-mV>8(f0o?`&uF z?wK<9b|%H(HLv|cyWT+js=WBoX+L!yWS#59t?f59jw&1H z#pDn9sr(~x1>;u5U;<_oX|8n^W)hW-3hXl6bd|~Y9KA|WGV{M-ZuGywQ&-I+Z(UFE zupipIkf`mx*BwM_m)v`spc$jeiDriXV+~Q)E$8kaT2^zDlW6OLH}51`TM>B|(dO4) zyPIg{)H(MO)n;_QpJ>yL7akzm`o#1HiR#b%`Vdk3jt3qlTH7h<5u$bPe*CDwL2uL& z)gQa#UqoHOvmO)l<^hirt?Bl?pk+bzPY`ZSTKy!^nAfK~MbuQ$^J${4jh_qJ^xNZ` z1b%kGGen!7F+58Y@eovd=#A$H*Ho{3o@mTR<`;<8-V*vEQTx1Ig4Vrw$7aH=_&8C1 zS}$26Xw8RT3u+iP;U(gmMm;8|&7-|cIA;1BL7Vg65;W7$_Z2d4-c%~6;owJt)^5K1 zRWfffZV*&o?h(|mbdo4vwgHa|YAQP?>AkbJkp0#hUlp|Oi>_MV^3sDhu1QSJ0XT zPC;Xizapq%;^z`qhHfYO&Cd@LwCUBEg0?=oT+p(`PY7D4-zR9zmY)ShKJFlQ`a8!< z+B9F#vQssJ+Jfo@ZEpQiP{X@nJ4HHH4imKLq8Sp`E|U}=>mt6EwM$UL`tJlaJ=I;b zZ-%Fm1kGH3lb|(OYXmiQc~(&S6CX$%_p6{y>-y{>|1u9>C1_JruApV{>m*IsENFAD zLy~{IO;G#4`t2rn&Ev-lYTB14Xl;s9P{RY81&uj!P|(bfHbM2hFKQ%rHQh{tx_-?O zw0Zj-f@;g26BOl7(AwIQg4(nb!|Bzs9{>vK601&Nus3Ise+oGwhL3Tj{eprEzA-w?FvpPvfa+U<8i4aI%;lY7)7K{L-@FR1O3<$^Yk zdstG_HbKiS|6I_fZhuJgFZ+H#?wcM=7PNN!^@5fixm8g6O%DlLbKrGBTQ5H%Xzl7Y zK{K1XG>Pz+^cB<)njmOQ)mTAIVUq=IE}boC>(}{$+9#F@T2_09pfM-!7u1mal%TD* zy(Vbpt2+g?eRELqqrMi@m2gtfm4MhAT`#CT zWPzZWpOy-0dgcy6ZT9;GtsU{CplBZib=|*R&^qG*L0i8(DyV(w4}#jl&k9<5SCmh_|>l=wBYV zE%N1*L(#)W&ey$^a_xsJ-O=~$Pno}HZgT3+2UD&!j(+mQb-$!^dFR-;{0Tc#x;?qe z_I}lwlt+j6*B8z`oAUl!pVm94wxskjjQ{a)rDl9#S;A92Lc@(a5B}h~;DIn>u=UUj zODDgO($#Qf>s3p-8h4!PH0hJM$5MK3%jmx^sFU%Xf1jGx{PT$v&(D1>?s98{ad)uG zUiG*uW!i4c{0xsVc3u4b58Cy;jGJEzu@AldXv(6;N5As=wK2x2Cpt}ToYKSi!nS`s z^67-$#!F9}e)Z|Re#RgFIGxn|@~M={R{O4*EkCEcvUhAmL{X^m=^KX}-(c@&JpTGE zOCLGf-)QR;`Sd$O!i_Q>y^Yu3v>@ixm7R=J<}UxK$Aa$0v2)Azzif;!etB=NmIH>K z#>;9q#KbJ@XEdLDqszm0^fo4Jd+Mg3@b;8~n|HmRvSp}o*mFC#4GT^%rsjUJqUnM& zDUVzka;?qO%V_I+>%GncQO1?eEFY5l!Ue`><~;gtMrAkS>ZVS%o!u@lwuN6dZg|89 ztdi_bp&<|W38HO;SW_~mTMeO)qk|6{};V~?n17I*QPl*L)Q!nf_a z*tl`>^fN8J1{muvyKJ`Uf)U0mlI9I|e$dxg_Pw*;raAqM-y9iwCS!7(@!qI`hxHq~ z8za6By~on!QscSYr=GheqrWj{+UN^wq6Qd851D5zSGCuR!uYKFDi!t{4Xx1gaeRYBHoo`-z=eya_#>cP9|8#`2pYh5o z$DaM7KF%20{l)B+t9uw9)wNh}TR+U$Z}tPdT2Bl$j=t@(#ZNtZp>bBHqKIpXh8t%- zf9oG3FS^7y{juy167q%`4MTQzYn%{k9R1#PUEZ9hH$D+GWKGnlNaM%LHoP|L&OXM6 z{&~~G0}@+O7Mxub{cX-w#-2ITj&%LUaAVXJb1$qJG}KsrQ)yCV;pN87Pd%5u;QD0a zMN97de8Irhl<)hDz3h|8t&41Tldp<8%7%QmMs1Ck-Jlj^|jTl$(Aw3 zOR}E*q3ajP#y74Ep8dtnF~;3{JU5Dj;UjeGjFfFZ^)?5 z#?~occhRnk;(Q!pY+L{RtAn+{#@Y{0?spzQ`br|BhQ|*u?tOap$e-=e#=)jdJyxzt zFkW0WV8>#^NMmM+lU|?3dHp_Fe?^_q=(u<7z{VwGjbGpX<@8-$Q;eIlZg1ZA#0cXf zk**IPTa#=&aQ*g^bx-Pz51zVypzY%nxBx84_)WE>Up=!r{y8expM=FZu_&KP5~?7XI}uwk_E zm+X_ef$zl`ADfZ#k^a~f#%oSiWv`k()~I{;|FaqeU2EvPg(o^l$Dcn2N?&vP-ZQ?KFm08@k@siM+`7V z{W|8z3p=kg{!ww6a! z#2bzGL=Loe$%!}KFzm60UR~N#CPrLzJZ^^>0)isq*2EH2YWwKdDWH1J1$)o9dqqSdcP-BwqltL2){!>*^7zMwZS0f&;@v36vH^yz zSI57`xbIWRcWIPwhse$-?~%sQJ(_8H`T8uEG*=UsZ9Fml zPEGHh2RwJ=9Qx_EOV@9CaG2(Wp`#YeI3A?gcen5G=F;Zz(c?zuxh723JTj%deA~I< zn$!AePp_RmL^Ent+{p<+!!*LIP`PSof@auPYKX zyN|3{IR43Dn$XJK7w5mD(tP{)7mHN$?$pE&t!(}2>0z3!KVNH0P2idy4>hgX_3>m) zec8+_>D%wqRFy1vy7bWzn!~^JSoQluaT@0=?Y;#2FwL8dS*xe7&}u?@Irn)_?4#Kl z|M_0Sey!$@cOQ)nJ8+j~=6y>a*ivE|A9o_w%m0!}({}Wfzj5;@&7|kOUuf?$T{EZR z()WqyG@6eLrIWR3Z3eC?IsJ2VFlmu%?sae~J1 zxb^D1y1|;h(;@=)ejclNdz&_3!5?EaM%|ExIrmJ`=m$jI-T2hKnlC@ps(1Guq-pu< zswe)sNvrvJ?ujv(2WDvIwsk9i**ihgt*rF(Z=Rl}*)?~6kntO}Cg7u{kc@*9H9ZHO zdO3XLc+G@$3qt!J7@&F5d~Am1jiH(m<+15+cXW=wQ{F5qpW0t@JS%t2^y-_t9;1^uBtmg@a?~UgawyPON%q z>{Wi7JAj=6{;u-pH*}o(=Ep1iv%Z!S!e>|b@!Ss|*VkO(&#%30V9S;(JbGk(()%l~ z@ExYC9NYXWyzkes_`AX*=5-d29D0SD?aMd6*X;`b^plD6m$!8Cz-?a7&1vl9XZ|3( zy6odl{?ZSR_`F-v$tOI2`Jo{%bn?gpkKwPAZuJ$`1Ue?LX&5>vhOl??AjA{(RP^+yrvv++bfs(3wIqG zpYh~n?sM+rQzs3V`K&%oJ9B4V=AZjEPK&?yGC!J=p}j|SnU@SWdgpZi%RH*KaQB9m zOZ<`it!tZ(U*dg?`y-6|FYzmXdG?OJwU_wEiyqbgwcrwe@CWt%KdrmOmqj?rgk_ib zwCB_OV&`7s5hbzsyTmP#`=1>?@)DO#Uw!ebUeH?v{9WQl?^3+7>Fh;*d)m_8Z~y5c zmld9#d3f(deoXPep9j@kQ_vmHT;!(uZXKtM7kSOfQ-Qyme~~+X{-(1o z{v!WrAmXdYCwBb1K4jtTAsm~F7JGgeOaK)$X0`EKPq5B7)yujOho{U}KxWLy4 zqqe;J@df_-vrhSd>I?jZG4C&}+;)LClw{{7Jb!@)efmbQeANa1PDS+T6H6}eWm&@B z&*ogwNZmEaQ`*imt}`u;Ktm*`A*pd-do?&OW)GY z58NBK^uiDAJf$IbNZH|b-aBJYuZ%tIe0)T4z|?ozdA@1(;cI?wG72}^x!=XvF!fz2szp69Qlbe`*dl7%@r=lR&Bx^cfecAlS1T(j%7 zMd!KrJoP-ky6$&B4W4qI|D{J;y#JWvm)^hhf|Q}sm2D+gQo1H%R$)qm8=!|r=0^uBjm z`GAJiF@s85`J%PTk&&;r@=fx^Uk%uZ^D()hF}bb$+@ynt=Vi6>c)#yjH>S7pj^S@F z`268kz9)Oc<;cWVJ~`BBTQ#|r@7O(e-s!Qe{8HaXf0=(rEB{0>V%n*Jt^EDJ{G7Z} z*~;?|tA+5OR<4)r8{Qza^0Ye&KU&}3!lxg;S~&ABE&R2d>Gr|S7H-kqE(9C_kIbEM z`CtnVzW1R`7e8*{+7Uvv_s$mXv!``SWJL>qE9CUB*tc4EsdxQ6)5|S9v@S2CF0X~p z_`ERY(vvOx?xDH$x)m*asPN62chg#U^OhXrfJa*Rj+HMx_2%przGcu<{I&3T`rfa7 zplRXX?|vhDPIL<&9%30F3~%Atm;2{_(65F6^gw*hs-7+UsC`MdDS<6KL?z&_g&X_r zdD8nrGf${rJFe|)GdI`s`~G^enSV5G)ROa!&Ae}JQMK(6A>vNmN)aaf}P>QTg`lX(Th2=w>In}8)?)`<{IS>< zHk8|4yy4BkXMg&@#os$S%5+Dqie-T>o@!+3`1BJjq`e!(VanlZLE!t6p^R z^@<7d{_Ao4=lWr9KjGq^UYHUvWQB`g4q3JRy+>WVv!eOS84F!}Lzc;M?m-v#j{ehy z*Y0=m{i(eUO`YoE3h!9_!EVaB4?c}?@rXzIz4wyZ#eW{QvNJWx#pT|zT&>E*?_GWW zJ%g06cLx{G!4KPc%U$#w1Hr?Lbu1P4k9B@6zL!IL%g=MfSpSp%wfSN`H>aU)k@A)J z=JV_yRGz=dM^a>D+`61GQu*Q~)=8W!v0h?ICe|_7{jqfrZeRq^ALtFl9Y?uTzP>16U-@zVu3|Is_0K@{f1L1%V@c#FV)dGdU%fK_hW5C0}RN!tP zWI7WA8&8PuB9o5N@Pw}UQo5d?gy5Cqm9rkYY{;EpE%=>C#ts0__JiCXAJ72<24D{g zZ~$13&8x2g(ZkV~5MdlPD^CVy0QUnA14{rikPEyFYy)-x9|B(hZGikV?15xp0YESJ zQDLJvrT^k>_}~MZQuQdV`fGeB0d^jN4rY#Tpa)w{GG>+G{29c$=f*y3?!k_UKR}E~JI@N2RBxm{SJ1w-96gWHHES zlv=%L@e=Ig!ZB?R7!{Y8G$=VPmdaqfQTGt(vTx8$b=&1YM{OX+of=1W1vlEc<63Ue z>D+Ox(7{zHt}dD4*57ECOosLwb?)>UZqOyU({tRAUXt6+37rue{zl;k;LR!M%jp8E z6FkRMiwNkW)TKn5K_*gRPG=6Vq| zrpCI_Z9{2GeG%VV6t-!XD&LGuyc8u9ZinCDq9r`+4$FLP;lu=5=j*6zbUTk!~LzJ@l-wHy2zyc;9c_n=A z%6(+4j}XiRZ-IFaz`Lq?%2`jdKkJ$5$9hKlvYw-Zoot}E$D$FHH6>gemsGAw$nSMI z7JOyQceG3w9N^6YqU9_g(bq_EG33}^VHX^MYs*Q|qtiGZjAMs%JPPudF6ZfZY?t%y z@}b=R5xy)Ux(ACe^>T(8L-mwCs*F7b+nIgv9yz2(>@G{9Oegg4Wj)|a5BSn!bf8hd z4^{?62e6<-U!A|?vnP%3F(;*CPK-DuvLF+f42>%Jr55nxa$%`Jk{=60dSRm#M#1f? zFAEs$YZTApToTSj`>?POoEP17mr;o9#v)S{EHbGFi;V8hB27vcyl{lr6-NIcfd#FD zvE)zC5WJTR7>%+J3w~bAZ-SRjrWO;F30_Rri}?yh@%VZ^?mU8!2l9#zWRatP%_AIx zvpasEjC_j1TQ7PU5fjqCY|KryKkUWML+hQz>K09bsxuId8)?0*JSJQ&9m_Cfxv%W-(9KMNH?ST|RIQ||D!BS*cvZU%~db*omqb}hwq=Y7a@ zJSrB~9UM~-!UI@1+8}&%Pgkfj*wMotWbCF7)CtM}rc4Er{FxGcSUEb(72<^7WN+@q z!czkg4|1XykLp2&^y2V^J`-jYY6ly53qb5S_IHmfjIEo-6`el`yJXBhBrhj+mqCFt z^gtQ9GvyYs4B30ImUE5-zc(+T1}tZ@7Q>TT#Z!CL}!bY9^lxc6tthq}$!DA-Z(r?~-Xc>2xH z(-^yVI=&x)_FV@K?vkI!rW!Wy1MyNH5b$GJ8D}n!{KrK(d>~dC}P4N5*`j{a777qWuJD!@DN?!=~#o z#SuJna1#0p>}d{m$4&KRbyx|U00uoS#qDw&9wlcHX+2no(4YA_f9)Giq#2AK&JBaV zXal3CK7zZntGj*zWHgVd1%Day--5Z5*o67~7HxC;JiL<|@YKPrY^rho_y38_d7K|S z|NouNGT1!PB^`GkMSF#hLVKs~L!1Ae^%Xv5-JRWxf#}mT9-Ka?$1s|&12BL2yL}H! z#C+_&9>~$RaZR{>Jy5WqFVUCioY{*7q*6PfzliOo7CiORJAn0$4q?5wC|t32A`~y+PqMhLD#eAbtHlxR{o35emcIpmaajn7l4%>oux_%wQH4N!=y^eX-0>M)I zc;<@GFan0?8>swEx6UHKO&FF~%AdwOzO;J=aC*`g^Ox2;;u^qHmY$pNECOi1$yV?aa2-cIQeR39Mt@SU-lkAju(O9F z$S&3?nxEa7&)82ux1Vr;Fy$rOLjj)STaC93$nR~yGbV))UlwA)~i`8%XNbqk&&Ncja*T$C+jhkI0AUuUg>&ES`jhh*m&BjLAUOql{*{!Zz-gp~^Q zOEtKgb z9=IXi?B9qd`PQ{>i`SSN*CT?vEq-0wiuVEmShL`M0N0M5iNP*U{ri20bvAH)tYHBn zhkv)~msfV-Ioj{A#`Dy57}|U9&8ry1?IUM>&{z8;_ICAjhB|`nJ+Ak=EBkOiupeVj zN+bNPK+IbMupWwF-Cfv{*XX17)?tl;I&Ml3*dI`5XFihFDYyHxK1unqKGBVIHFX2JP zE_i(FE@RzC2dD+5FV=5Q;feRU~#Z2BV`X zqHzq_Q_mHjV+{l-&LA@cf$F*F*7f}GVaAGq>-9|YQ;1CM-{l^~lkUc^8T$yJXDh_y z3kCN$3K8XaE^-6zuWt@AD4fvCald?Xpl4zfJkO<+kaud_svG|EJiF^^7bfM|)zEW3 z2YFG-7>;fv<2Vh-2GpTYD)2lc7=#QrLH7nw3492Uhcf2H7N=*J4W^9rg`-k4@O_#k zixwEASBIyiFG(||tXQ!?G+`N;E4v<=4J*uxvz9MHO7x7t6OQ5806jyATO{tbL8?cI)e>tZPLz0|#72qNNxVto?GoE0wo80S;*%1$ zNi3JzwU@-6{*&W`3P_8pFQ?oJ*@$NXd zo0&c-apt6iyGFYQ7VG7cXwG5Y%&4ITsbl|U{hq%Snl8#s$^Zj(vbXq6MBr5LfPsU`zGceMO!M1Fxwo zq=Yj~lMVD%P-J9SfzHO{RN}Z4BkAHY;a0Yp^(Nh%baaUnshRPa6VjMySeUhBiNWNF zd&%dwyVVn3aNj64?^?I)ZbG(Qn2c@{myx-`z>3`R0P&OCW=9-x$Gn4 zi++%v8b=kMvc$lS3Gg~L4r_t^!tAPB&Rm8emYR^h+z`vI-K3ez z26)fN$}p!-SY^gbi07uK8sgGYOl-QZM@-7Rex9XJK9iO|o^Hxqz6_NnzS4LFYW7Qa zE8Xf;xUW=!sCY9vDr4up5;9Xz9(RkneLdxs5O@6z$w=*#r#;b@RC{>(B#zEhaUe3b ziY%yNl2#cF%uhUPN?*Q&j*-H8cTLa6_{WnMx14AY+h7ZgzS-vMDILJ5!ce8OCS)yM zY%rnsEbyI+q2+$ZGmD~}&>>c2W*BB=nT=VfTs)cRqQw+}T+%3AN<|tJ7+CB_7Kh17 zOlU6K&fKjnx$uaXhm^xtu7bkX;>zdqu>6H|=y$A?v4F8=!3HM;82 z8czIl%PqJMX{|-Ut-*g?WzvapJZHu8-Ec^sk^N(Xxbxyx|BLymelz@I%k_Vi`ftBz zQY^u5zXT}lCtc3 zQ7FXo8w$B^J3EQBPvyAT3j009$BI>?yhX^pDk`-SzxQ|A$4d6^!~Tn+(?t>N$D*bp zJe0mETu=Y0LU)Y&eXs}O*dxbOY{9WdkBz>`8s-10@jK^5Jnwr-jOhv!_e+U)CBNR6 zQ$&yMZxXqMBJZ_Z@4NDt{@DhDN|B90x3=@7^INQXRgCA9_RYop zmLc?l3jh5Bf=u;aYs0^{2$b&4*X6%AuE&o0)>T~WV_p~QDZBK4hu2DBoRsbwDc%1n zk3#tf|LQ^bPlD{G;3Wl*-_%AFJjYE-ZX6GzcR zGVvZs-Ut3#l8Ix7Aa9ev;=wBcO49=N9V(WG_BVt8l)etU6i9}>5&W1W=YVr1c{lht zK=voV1Ea(=2Z6@}q}PJkFtNRervju;0GlM)4E`NJ<=G8xmE<;XJoew%sm$PQ0JZye z@TUOTH-Jat-jVdg{v)xrf_*peY%Z`2 z`zuKH21f#9PfYs^NG9F|*pQ|jd|5L$nlQ5QHKM{NcpmCv_jCuvgU*cX< zM7MYSF0en!0`h@-vnBOnvs|6<}qHjQ- z1bzjevK4@L0OaFN@aK~30Ef=Teo@#*g69Htkdwj7lEii~g5L$ULhk~HJ|L!}1V>3S zu>qj+8^LvwOdN=PHk7Xl94*Pj6D65=l_akLzbMJXRlwKC*8!f1$4r!t4qPV5UH9eC zejRx-+8y@9y#WRG;Hbc_0yL(&?#-b+I@=yZny?{$2S|Wy2eWzTw~!U!S-?)nI`Bq7 zwG#aVd<1BOz7c%yeB=u`2JG`NVbZ}4=0%A5f1vjo0F9|?{H zD6ffN+Oth^8u$f(>|X@eN^%`|a2o0gan;}qU?=45;4zPieR~|ZX({>!^uz;}W2`|Q z1h!_1^CYqF3dYVtF9#O`ROdGEelyx88{-lDa~Ae&Kt2oJwG#6)7E7MIVh|pJx~b3a+!@&o-d^kPi_*E7m8m37~YW;5YKHR)EcR@Ku1y&z{5Q z$pG09nK8G8K_*@bBtXsp+W<K4q%{huvqyvx*@sKiHa}hTR zyb+*wv4H;o6hPl~Zw&33iLXR_=!w&SM#x6+8&y(V@K_tJWzcJhYcVDvvpQTW0QnlM z6~O~{pdUgW1b*N>^cl#>;B&w@$S$z=`;srv zJ(y!5ZwG&{SCs3)Bln@MpeLTXA8RSd3E(Fh-~;3};86#W9Aq`v1W;cxgTDf(3@-4c z&rk-~bb{0VAhs9r8vwP-c5uld)F*6+cL5!cb$`V3e1PU&`R6Dv;Eg>V#Cw5A$os(Q zUtmsyoB{TBi2YvyUIPvcV2fx5mzY@o_5j@~) zqzQcv_%px^8Q-U5Lyw65(jKZw0HtpSe+Lx6hW7m|`9>UrIpEI$3-q-Aap0ds-?iWg zfD3W;;Ke`(l@q)Hpf!9Q*bculykD6(`Z>0TspteP@Zj<8T7^ zV#M_Q29k;Cy9p!{({~KgkS{Uq@wY)HrhW7=NS~PYt~((U)4p|zOH6y@$%dHr$CI9z z_WhAeyik&fXiego#GRE7lhqi*t(ei!H@<#r9%H zv9s7!%t{m`suFdHwnSH=FEN(nl;oFKO6p4NC5{qjiK~Q_DoRzQ>QZf~u2f%YEX^s+ zFSV4`mD)=krOr}UDJxTysmj!4+A>|4zRXybQ-XVht+9ySy_dm zLRF!z&{pUw^cBX6oQnJkOGRCUy~0u9tZ-GZN=2m#S9z>APzMEtmO^V`L!qOvv9QkV zVQluTk(e2L8swpi_|gDx z8sSe9JnDc?a(JbLUs3SPSe;#+Q=M0xUtLgbskTZWQ}bw@R; zk=H0{lr^fFs2X)mOpUfCzD8G*T%)f^t1;GO*W}dX)#TR{)L3e)HFY)hHTD`uO=FF- zrm4nN(^11}<+X}hWv!|(3wuhrEi*XnE2YK^towK=tUwfVILwU%0IZC!1B zt-ZFP)=}G7>#S|6b=A^3hbeH)qF!#1TND%G3j!JozvMQ=7rYgQFxhkzHyDG1$pvqcRU)506Sk+Y3Q6;x2ZBe!u zTf8mVmS)Sg<=F~sR$INT!PaPNvUS+x)ynFq>X_>I>g4J))KVU5$cox&K+QCvR^+IW zDAYzgY9bA_kcS$u!utmJ-UPqp@Hq3Y%_?ZVEt?;h_zBP${q0!+Qlf$PdON=Gn zl59z{WLxqq1s1EN-qK)cv@}^dEb>BSVN_vEVSHh7VOn8!VP0VY`bj-{N@HPDVMn37 zNLdtx@e^N^T$EOnjnPwp(Nka4P}GRe+a%oy=c4=N|L8-O0 zzOt4F_UM4#&@lcTpqmB*CFmnWB}m1mdd zl^2v-%j?S<${Wj@$~(&CR;4w{iuP4w9fA7zS02miN^PaCQeSDT%&E+;v{cqr+AAHE z&PrD$t5Q^{s?=55DqWSn%2<_Cm0xA4s;jbBIjWpht}13z*i<&PO>5KH^fsd{$Chui z*y?O{o5SX`xooUjQLU<0S8JUnBQFP|&VmtV$7pk6q@n(`xX*KsGX=(&+M>1SEP9L4l4HrY zSS)oGyTxH~T3i-Zs3=qwstdJ+xOVt$OOz=?dU?LjK^pgi=R2J{>y=BF6Uz1f&|EmkY$ k)q2dV4VYUSF}pTdJ21cgnufYUhgraa+4SGxU#ABC18xy{IsgCw literal 0 HcmV?d00001 diff --git a/setup.py b/setup.py index 30882e4..9151057 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,7 @@ import platform if os.path.isfile("./skip_extensions"): BUILD_EXTENSIONS = False else: BUILD_EXTENSIONS = True +if os.name == "nt": BUILD_EXTENSIONS = False if BUILD_EXTENSIONS: print(f"Building LXST with native extensions...") else: print(f"Building LXST without native extensions...") @@ -14,7 +15,6 @@ with open("README.md", "r") as fh: long_description = fh.read() exec(open("LXST/_version.py", "r").read()) c_sources = ["LXST/Filters.c"] -if os.name == "nt": c_sources.append("LXST/Platforms/windows.c") if BUILD_EXTENSIONS: extensions = [ Extension("LXST.filterlib", sources=c_sources, include_dirs=["LXST"], language="c"), ] else: extensions = [] @@ -35,7 +35,10 @@ package_data = { "Filters.h", "Filters.c", "filterlib*.so", - "filterlib*.pyd", + "filterlib*.dll", + "Platforms/linux/pulseaudio.h", + "Platforms/darwin/coreaudio.h", + "Platforms/windows/mediafoundation.h", ] } @@ -64,10 +67,9 @@ setuptools.setup( }, install_requires=["rns>=1.0.4", "lxmf>=0.9.3", - "soundcard>=0.4.5", "numpy>=2.3.4", "pycodec2>=4.1.0", "audioop-lts>=0.2.1;python_version>='3.13'", - "cffi>=1.17.1"], + "cffi>=2.0.0"], python_requires=">=3.11", )