refactor audio calls into manager class and rewrite user interface

This commit is contained in:
liamcottle
2024-05-20 23:18:17 +12:00
parent 86d9acb4ec
commit 9dbb1ff9e7
3 changed files with 599 additions and 307 deletions

View File

@@ -1,73 +1,262 @@
<html>
<body>
<div>
<head>
<div>
<button onclick="startStreaming()">Start Streaming</button>
<button onclick="stopStreaming()">Stop Streaming</button>
<button onclick="startListening()">Start Listening</button>
<button onclick="stopListening()">Stop Listening</button>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<title>Phone | Reticulum WebChat</title>
<!-- scripts -->
<script src="assets/js/tailwindcss/tailwind-v3.4.3-forms-v0.5.7.js"></script>
<script src="assets/js/vue@3.4.26/dist/vue.global.js"></script>
<!-- codec2 -->
<script src="assets/js/codec2-emscripten/c2enc.js"></script>
<script src="assets/js/codec2-emscripten/c2dec.js"></script>
<script src="assets/js/codec2-emscripten/sox.js"></script>
<script src="assets/js/codec2-emscripten/codec2-lib.js"></script>
</head>
<body class="bg-gray-100">
<div id="app" class="flex h-full">
<div class="mx-auto my-auto w-full max-w-md">
<!-- in active call -->
<div v-if="isWebsocketConnected" class="w-full">
<div class="border rounded-xl bg-white shadow w-full">
<div class="flex border-b border-gray-300 text-gray-700 p-2">
<div class="my-auto mr-2">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-6 h-6">
<path stroke-linecap="round" stroke-linejoin="round" d="M2.25 6.75c0 8.284 6.716 15 15 15h2.25a2.25 2.25 0 0 0 2.25-2.25v-1.372c0-.516-.351-.966-.852-1.091l-4.423-1.106c-.44-.11-.902.055-1.173.417l-.97 1.293c-.282.376-.769.542-1.21.38a12.035 12.035 0 0 1-7.143-7.143c-.162-.441.004-.928.38-1.21l1.293-.97c.363-.271.527-.734.417-1.173L6.963 3.102a1.125 1.125 0 0 0-1.091-.852H4.5A2.25 2.25 0 0 0 2.25 4.5v2.25Z" />
</svg>
</div>
<div class="my-auto">Reticulum Phone</div>
</div>
<div class="border-b border-gray-300 text-gray-700 p-2">
<div class="mb-2">
<div class="mb-1 text-sm font-medium text-gray-900">Call Hash</div>
<div class="text-xs text-gray-600">{{ callHash }}</div>
</div>
<div class="mb-2">
<div class="mb-1 text-sm font-medium text-gray-900">TX Bytes</div>
<div class="text-xs text-gray-600">{{ formatBytes(txBytes) }}</div>
</div>
<div class="mb-2">
<div class="mb-1 text-sm font-medium text-gray-900">RX Bytes</div>
<div class="text-xs text-gray-600">{{ formatBytes(rxBytes) }}</div>
</div>
<div>
<input id="checkbox-mute-mic" type="checkbox"/> Mute Mic
</div>
<div>
<select id="codec-mode">
<div class="mb-1 text-sm font-medium text-gray-900">Codec2 Mode</div>
<select v-model="codecMode" class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5">
<option value="3200">3200</option>
<option value="2400">2400</option>
<option value="1600">1600</option>
<option value="1400">1400</option>
<option value="1300">1300</option>
<option value="1200" selected>1200</option>
<option value="1200">1200</option>
<option value="700C">700C</option>
<option value="450">450</option>
<option value="450PWB">450PWB</option>
</select>
</div>
<div>Encoded Bytes Sent: <span id="encoded-bytes-sent"></span></div>
</div>
<div class="flex text-gray-900 p-2">
<button @click="isMicMuted = !isMicMuted" type="button" :class="[ isMicMuted ? 'bg-red-500 hover:bg-red-400 focus-visible:outline-red-500' : 'bg-green-500 hover:bg-green-400 focus-visible:outline-green-500' ]" class="my-auto inline-flex items-center gap-x-1 rounded-md p-2 text-sm font-semibold text-white shadow-sm focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" class="w-5 h-5">
<path d="M7 4a3 3 0 0 1 6 0v6a3 3 0 1 1-6 0V4Z" />
<path d="M5.5 9.643a.75.75 0 0 0-1.5 0V10c0 3.06 2.29 5.585 5.25 5.954V17.5h-1.5a.75.75 0 0 0 0 1.5h4.5a.75.75 0 0 0 0-1.5h-1.5v-1.546A6.001 6.001 0 0 0 16 10v-.357a.75.75 0 0 0-1.5 0V10a4.5 4.5 0 0 1-9 0v-.357Z" />
</svg>
</button>
<button @click="leaveCall" type="button" class="ml-auto my-auto inline-flex items-center gap-x-1 rounded-md bg-red-500 p-2 text-sm font-semibold text-white shadow-sm hover:bg-red-400 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-red-500">
Leave Call
</button>
</div>
</div>
</div>
<!-- no call active -->
<div v-else class="w-full">
<div class="border rounded-xl bg-white shadow w-full">
<div class="flex border-b border-gray-300 text-gray-700 p-2">
<div class="my-auto mr-2">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-6 h-6">
<path stroke-linecap="round" stroke-linejoin="round" d="M2.25 6.75c0 8.284 6.716 15 15 15h2.25a2.25 2.25 0 0 0 2.25-2.25v-1.372c0-.516-.351-.966-.852-1.091l-4.423-1.106c-.44-.11-.902.055-1.173.417l-.97 1.293c-.282.376-.769.542-1.21.38a12.035 12.035 0 0 1-7.143-7.143c-.162-.441.004-.928.38-1.21l1.293-.97c.363-.271.527-.734.417-1.173L6.963 3.102a1.125 1.125 0 0 0-1.091-.852H4.5A2.25 2.25 0 0 0 2.25 4.5v2.25Z" />
</svg>
</div>
<div class="my-auto">Reticulum Phone</div>
</div>
<div class="flex text-gray-900 p-2 space-x-2">
<div class="flex-1">
<input v-model="callHash" type="text" placeholder="Enter Call Hash" class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2">
</div>
<button @click="joinCall" type="button" class="my-auto inline-flex items-center gap-x-1 rounded-md bg-gray-500 p-2 text-sm font-semibold text-white shadow-sm hover:bg-gray-400 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-gray-500">
Join Existing Call
</button>
</div>
</div>
</div>
</div>
<script src="assets/js/codec2-emscripten/c2enc.js"></script>
<script src="assets/js/codec2-emscripten/c2dec.js"></script>
<script src="assets/js/codec2-emscripten/sox.js"></script>
<script src="assets/js/codec2-emscripten/codec2-lib.js"></script>
</div>
<script>
Vue.createApp({
data() {
return {
// find elements
const codecModeElement = document.getElementById("codec-mode");
const encodedBytesSentElement = document.getElementById("encoded-bytes-sent");
const checkboxMuteMicElement = document.getElementById("checkbox-mute-mic");
isWebsocketConnected: false,
callHash: null,
txBytes: 0,
rxBytes: 0,
var encodedBytesSent = 0;
isMicMuted: true,
codecMode: "1200",
sampleRate: 8000,
let audioContext;
let mediaStreamSource;
let audioWorkletNode;
let microphoneMediaStream;
audioContext: null,
mediaStreamSource: null,
audioWorkletNode: null,
microphoneMediaStream: null,
var callWebsocket = null;
var listenWebsocket = null;
};
},
mounted: function() {
async function startRecordingMicrophone(onAudioAvailable) {
},
methods: {
async joinCall() {
// make sure call hash provided
if(!this.callHash){
alert("Enter hash of existing call.");
return;
}
// reset stats
this.txBytes = 0;
this.rxBytes = 0;
// connect to websocket
this.ws = new WebSocket(location.origin.replace(/^http/, 'ws') + `/api/v1/calls/${this.callHash}/audio`);
this.ws.addEventListener('open', async () => {
// we are now connected
this.isWebsocketConnected = true;
// send mic audio over call
await this.startRecordingMicrophone((encoded) => {
// do nothing if websocket closed
if(this.ws.readyState !== WebSocket.OPEN){
return;
}
// do nothing when audio muted
if(this.isMicMuted){
return;
}
// send encoded audio to websocket
this.ws.send(encoded);
// update stats
this.txBytes += encoded.length;
});
});
this.ws.addEventListener('close', () => {
this.isWebsocketConnected = false;
this.leaveCall();
});
this.ws.addEventListener('error', (error) => {
console.log(error);
});
// listen to audio from call
this.ws.onmessage = async (event) => {
// get encoded codec2 bytes from websocket message
const encoded = await event.data.arrayBuffer();
// update stats
this.rxBytes += encoded.byteLength;
// decode codec2 audio
const decoded = await Codec2Lib.runDecode(this.codecMode, encoded);
// convert decoded codec2 to wav audio
const wavAudio = await Codec2Lib.rawToWav(decoded);
// play wav audio buffer
let audioCtx = new AudioContext()
const audioBuffer = await audioCtx.decodeAudioData(wavAudio.buffer);
const sampleSource = audioCtx.createBufferSource();
sampleSource.buffer = audioBuffer;
sampleSource.connect(audioCtx.destination)
sampleSource.start(0);
};
},
leaveCall: function() {
// mute mic
this.isMicMuted = true;
// disconnect websocket
if(this.ws){
this.ws.close();
}
// disconnect media stream source
if(this.mediaStreamSource){
this.mediaStreamSource.disconnect();
}
// stop using microphone
if(this.microphoneMediaStream){
this.microphoneMediaStream.getTracks().forEach(track => track.stop());
}
// disconnect the audio worklet node
if(this.audioWorkletNode){
this.audioWorkletNode.disconnect();
}
// close audio context
if(this.audioContext && this.audioContext.state !== "closed"){
this.audioContext.close();
}
},
async startRecordingMicrophone(onAudioAvailable) {
try {
// load audio worklet module
audioContext = new AudioContext({ sampleRate: 8000 });
await audioContext.audioWorklet.addModule('assets/js/codec2-emscripten/processor.js');
audioWorkletNode = new AudioWorkletNode(audioContext, 'audio-processor');
this.audioContext = new AudioContext({ sampleRate: this.sampleRate });
await this.audioContext.audioWorklet.addModule('assets/js/codec2-emscripten/processor.js');
this.audioWorkletNode = new AudioWorkletNode(this.audioContext, 'audio-processor');
// handle audio received from audio worklet
audioWorkletNode.port.onmessage = async (event) => {
this.audioWorkletNode.port.onmessage = async (event) => {
// convert audio received from worklet processor to wav
const buffer = encodeWAV(event.data, 8000);
const buffer = this.encodeWAV(event.data, this.sampleRate);
// convert wav audio to codec2
const rawBuffer = await Codec2Lib.audioFileToRaw(buffer, "audio.wav");
const encoded = await Codec2Lib.runEncode(codecModeElement.value, rawBuffer);
const encoded = await Codec2Lib.runEncode(this.codecMode, rawBuffer);
// pass encoded audio to callback
onAudioAvailable(encoded);
@@ -75,178 +264,31 @@
};
// request access to the microphone
microphoneMediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
this.microphoneMediaStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
// send mic audio to audio worklet
mediaStreamSource = audioContext.createMediaStreamSource(microphoneMediaStream);
mediaStreamSource.connect(audioWorkletNode);
this.mediaStreamSource = this.audioContext.createMediaStreamSource(this.microphoneMediaStream);
this.mediaStreamSource.connect(this.audioWorkletNode);
} catch(e) {
alert(e);
console.log(e);
}
}
async function startStreaming() {
try {
// reset stats
encodedBytesSent = 0;
// ask who to call
const destinationHash = prompt("Enter destination hash to call");
if(!destinationHash){
return;
}
// connect to websocket
callWebsocket = new WebSocket(location.origin.replace(/^http/, 'ws') + "/call/initiate/" + destinationHash);
callWebsocket.onmessage = async function(event) {
// get encoded codec2 bytes from websocket message
const encoded = await event.data.arrayBuffer();
// decode codec2 audio
const decoded = await Codec2Lib.runDecode(codecModeElement.value, encoded);
// convert decoded codec2 to wav audio
const wavAudio = await Codec2Lib.rawToWav(decoded);
// play wav audio buffer
let audioCtx = new AudioContext()
const audioBuffer = await audioCtx.decodeAudioData(wavAudio.buffer);
const sampleSource = audioCtx.createBufferSource();
sampleSource.buffer = audioBuffer;
sampleSource.connect(audioCtx.destination)
sampleSource.start(0);
};
// record mic to send to websocket
await startRecordingMicrophone((encoded) => {
// do nothing if websocket closed
if(callWebsocket.readyState !== WebSocket.OPEN){
return;
}
// do nothing when audio muted
if(checkboxMuteMicElement.checked){
return;
}
// send encoded audio to websocket
callWebsocket.send(encoded);
// update stats
encodedBytesSent += encoded.length;
encodedBytesSentElement.innerText = formatBytes(encodedBytesSent);
});
} catch(error) {
alert(error);
console.log(error);
}
}
function stopStreaming() {
// disconnect websocket
if(callWebsocket){
callWebsocket.close()
}
// disconnect media stream source
if(mediaStreamSource){
mediaStreamSource.disconnect();
}
// stop using microphone
if(microphoneMediaStream){
microphoneMediaStream.getTracks().forEach(track => track.stop());
}
// disconnect the audio worklet node
if(audioWorkletNode){
audioWorkletNode.disconnect();
}
// close audio context
if(audioContext){
audioContext.close();
}
}
async function startListening() {
// connect to websocket to get codec2 packets
listenWebsocket = new WebSocket(location.origin.replace(/^http/, 'ws') + "/call/listen");
listenWebsocket.onmessage = async function(event) {
// get encoded codec2 bytes from websocket message
const encoded = await event.data.arrayBuffer();
// decode codec2 audio
const decoded = await Codec2Lib.runDecode(codecModeElement.value, encoded);
// convert decoded codec2 to wav audio
const wavAudio = await Codec2Lib.rawToWav(decoded);
// play wav audio buffer
let audioCtx = new AudioContext()
const audioBuffer = await audioCtx.decodeAudioData(wavAudio.buffer);
const sampleSource = audioCtx.createBufferSource();
sampleSource.buffer = audioBuffer;
sampleSource.connect(audioCtx.destination)
sampleSource.start(0);
};
// record mic to send to websocket
await startRecordingMicrophone((encoded) => {
// do nothing if websocket closed
if(listenWebsocket.readyState !== WebSocket.OPEN){
return;
}
// do nothing when audio muted
if(checkboxMuteMicElement.checked){
return;
}
// send encoded audio to websocket
listenWebsocket.send(encoded);
// update stats
encodedBytesSent += encoded.length;
encodedBytesSentElement.innerText = formatBytes(encodedBytesSent);
});
}
function stopListening() {
if(listenWebsocket){
listenWebsocket.close();
listenWebsocket = null;
}
}
function encodeWAV(samples, sampleRate = 8000, numChannels = 1) {
},
encodeWAV: function(samples, sampleRate = 8000, numChannels = 1) {
const buffer = new ArrayBuffer(44 + samples.length * 2);
const view = new DataView(buffer);
// RIFF chunk descriptor
writeString(view, 0, 'RIFF');
this.writeString(view, 0, 'RIFF');
view.setUint32(4, 36 + samples.length * 2, true); // file length
writeString(view, 8, 'WAVE');
this.writeString(view, 8, 'WAVE');
// fmt sub-chunk
writeString(view, 12, 'fmt ');
this.writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true); // sub-chunk size
view.setUint16(20, 1, true); // audio format (1 = PCM)
view.setUint16(22, numChannels, true); // number of channels
@@ -256,30 +298,27 @@
view.setUint16(34, 16, true); // bits per sample
// data sub-chunk
writeString(view, 36, 'data');
this.writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true); // data chunk length
// write the PCM samples
floatTo16BitPCM(view, 44, samples);
this.floatTo16BitPCM(view, 44, samples);
return buffer;
}
function writeString(view, offset, string) {
},
writeString: function(view, offset, string) {
for(let i = 0; i < string.length; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function floatTo16BitPCM(output, offset, input) {
},
floatTo16BitPCM: function(output, offset, input) {
for(let i = 0; i < input.length; i++, offset += 2){
const s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function formatBytes(bytes) {
},
formatBytes: function(bytes) {
if(bytes === 0){
return '0 Bytes';
@@ -293,8 +332,9 @@
return parseFloat((bytes / Math.pow(k, i)).toFixed(decimals)) + ' ' + sizes[i];
}
},
},
}).mount('#app');
</script>
</body>
</html>

195
src/audio_call_manager.py Normal file
View File

@@ -0,0 +1,195 @@
import asyncio
from typing import List
import RNS
# todo optionally identity self over link
# todo allowlist/denylist for incoming calls
class AudioCall:
def __init__(self, link: RNS.Link, is_outbound: bool):
self.link = link
self.is_outbound = is_outbound
self.link.set_link_closed_callback(self.on_link_closed)
self.link.set_packet_callback(self.on_packet)
self.audio_packet_listeners = []
def register_audio_packet_listener(self, callback):
self.audio_packet_listeners.append(callback)
def unregister_audio_packet_listener(self, callback):
self.audio_packet_listeners.remove(callback)
# handle link being closed
def on_link_closed(self):
print("[AudioCall] on_link_closed")
self.hangup()
# handle packet received over link
def on_packet(self, message, packet):
# send audio received from call initiator to all audio packet listeners
for audio_packet_listener in self.audio_packet_listeners:
audio_packet_listener(message)
# send an audio packet over the link
def send_audio_packet(self, data):
# do nothing if link is not active
if self.is_active() is False:
return
# drop audio packet if it is too big to send
if len(data) > RNS.Link.MDU:
print("[AudioCall] dropping audio packet " + str(len(data)) + " bytes exceeds the link packet MDU of " + str(RNS.Link.MDU) + " bytes")
return
# send codec2 audio received from call receiver to call initiator over reticulum link
RNS.Packet(self.link, data).send()
# gets the identity of the caller, or returns None if they did not identify
def initiator_identity(self):
return self.link.get_remote_identity()
# determine if this call is still active
def is_active(self):
return self.link.status == RNS.Link.ACTIVE
# handle hanging up the call
def hangup(self):
print("[AudioCall] hangup")
self.link.teardown()
pass
class AudioCallManager:
def __init__(self, identity: RNS.Identity):
self.identity = identity
self.on_incoming_call_callback = None
self.on_outgoing_call_callback = None
self.audio_call_receiver = AudioCallReceiver(manager=self)
# remember audio calls
self.audio_calls: List[AudioCall] = []
# announces the audio call destination
def announce(self, app_data=None):
self.audio_call_receiver.destination.announce(app_data)
print("[AudioCallManager] announced destination: " + RNS.prettyhexrep(self.audio_call_receiver.destination.hash))
# set the callback for incoming calls
def register_incoming_call_callback(self, callback):
self.on_incoming_call_callback = callback
# set the callback for outgoing calls
def register_outgoing_call_callback(self, callback):
self.on_outgoing_call_callback = callback
# handle incoming calls from audio call receiver
def handle_incoming_call(self, audio_call: AudioCall):
# remember it
self.audio_calls.append(audio_call)
# fire callback
if self.on_incoming_call_callback is not None:
self.on_incoming_call_callback(audio_call)
# handle outgoing calls
def handle_outgoing_call(self, audio_call: AudioCall):
# remember it
self.audio_calls.append(audio_call)
# fire callback
if self.on_outgoing_call_callback is not None:
self.on_outgoing_call_callback(audio_call)
# find an existing audio call from the provided link hash
def find_audio_call_by_link_hash(self, link_hash: bytes):
for audio_call in self.audio_calls:
if audio_call.link.hash == link_hash:
return audio_call
return None
# attempts to initiate a call to the provided destination and returns the link hash on success
# FIXME: implement timeout. at the moment, it loops forever if no path is found
async def initiate(self, destination_hash: bytes) -> bytes:
# wait until we have a path to the destination
# FIXME: implement timeout instead of looping forever
if not RNS.Transport.has_path(destination_hash):
RNS.Transport.request_path(destination_hash)
while not RNS.Transport.has_path(destination_hash):
await asyncio.sleep(0.1)
# create outbound destination to initiate audio calls
server_identity = RNS.Identity.recall(destination_hash)
server_destination = RNS.Destination(
server_identity,
RNS.Destination.OUT,
RNS.Destination.SINGLE,
"call",
"audio"
)
# create link
link = RNS.Link(server_destination)
# register link state callbacks
link.set_link_established_callback(self.on_link_established)
return link.hash
def on_link_established(self, link: RNS.Link):
# todo: this can be optional, it's only being sent by default for ui, can be removed
link.identify(self.identity)
# create audio call
audio_call = AudioCall(link, is_outbound=True)
# handle new outgoing call
self.handle_outgoing_call(audio_call)
class AudioCallReceiver:
def __init__(self, manager: AudioCallManager):
self.manager = manager
# create destination for receiver audio calls
self.destination = RNS.Destination(
self.manager.identity,
RNS.Destination.IN,
RNS.Destination.SINGLE,
"call",
"audio",
)
# register link state callbacks
self.destination.set_link_established_callback(self.client_connected)
# find an existing audio call from the provided link
def find_audio_call_by_link_hash(self, link_hash: bytes):
for audio_call in self.manager.audio_calls:
if audio_call.link.hash == link_hash:
return audio_call
return None
# client connected to us, set up an audio call instance
def client_connected(self, link: RNS.Link):
# todo: this can be optional, it's only being sent by default for ui, can be removed
link.identify(self.manager.identity)
# create audio call
audio_call = AudioCall(link, is_outbound=False)
# pass to manager
self.manager.handle_incoming_call(audio_call)

163
web.py
View File

@@ -18,6 +18,7 @@ from peewee import SqliteDatabase
import database
from lxmf_message_fields import LxmfImageField, LxmfFileAttachmentsField, LxmfFileAttachment
from src.audio_call_manager import AudioCall, AudioCallManager
class ReticulumWebChat:
@@ -84,7 +85,13 @@ class ReticulumWebChat:
# remember websocket clients
self.websocket_clients: List[web.WebSocketResponse] = []
self.link_call_audio = None
# register audio call identity
self.audio_call_manager = AudioCallManager(identity=self.identity)
self.audio_call_manager.register_incoming_call_callback(self.on_incoming_audio_call)
# handle receiving a new audio call
def on_incoming_audio_call(self, audio_call: AudioCall):
print("on_incoming_audio_call: {}".format(audio_call.link.hash.hex()))
# web server has shutdown, likely ctrl+c, but if we don't do the following, the script never exits
async def shutdown(self, app):
@@ -232,69 +239,89 @@ class ReticulumWebChat:
return websocket_response
# handle websocket clients for listening for calls
@routes.get("/call/listen")
# get calls
@routes.get("/api/v1/calls")
async def index(request):
# get audio calls
audio_calls = []
for audio_call in self.audio_call_manager.audio_calls:
# get initiator identity hash
initiator_identity_hash = None
initiator_identity = audio_call.initiator_identity()
if initiator_identity is not None:
initiator_identity_hash = initiator_identity.hash.hex()
audio_calls.append({
"hash": audio_call.link.hash.hex(),
"initiator_identity_hash": initiator_identity_hash,
"is_active": audio_call.is_active(),
"is_outbound": audio_call.is_outbound,
})
return web.json_response({
"audio_calls": audio_calls,
})
# initiate a call to the provided destination
@routes.get("/api/v1/calls/initiate/{destination_hash}")
async def index(request):
# get path params
destination_hash = request.match_info.get("destination_hash", "")
# convert destination hash to bytes
destination_hash = bytes.fromhex(destination_hash)
# initiate audio call
link_hash = await self.audio_call_manager.initiate(destination_hash)
return web.json_response({
"hash": link_hash.hex(),
})
# handle websocket client for sending and receiving audio packets in a call
@routes.get("/api/v1/calls/{audio_call_link_hash}/audio")
async def ws(request):
# get path params
audio_call_link_hash = request.match_info.get("audio_call_link_hash", "")
# convert hash to bytes
audio_call_link_hash = bytes.fromhex(audio_call_link_hash)
# find audio call
audio_call = self.audio_call_manager.find_audio_call_by_link_hash(audio_call_link_hash)
if audio_call is None:
# fixme: web browser expects websocket, so this won't be useful
return web.json_response({
"message": "audio call not found",
}, status=404)
# send audio received from call initiator to call receiver websocket
def on_audio_packet(data):
if websocket_response.closed is False:
try:
asyncio.run(websocket_response.send_bytes(data))
except:
# ignore errors sending audio packets to websocket
pass
# register audio packet listener
audio_call.register_audio_packet_listener(on_audio_packet)
# prepare websocket response
websocket_response = web.WebSocketResponse()
await websocket_response.prepare(request)
# create destination to allow incoming audio calls
server_identity = self.identity
server_destination = RNS.Destination(
server_identity,
RNS.Destination.IN,
RNS.Destination.SINGLE,
"call",
"audio",
)
# client connected to us
def client_connected(link):
print("client connected")
self.link_call_audio = link
link.set_link_closed_callback(client_disconnected)
link.set_packet_callback(server_packet_received)
# client disconnected from us
def client_disconnected(link):
print("client disconnected")
self.link_call_audio = None
# client sent us a packet
def server_packet_received(message, packet):
# send audio received from call initiator to call receiver websocket
asyncio.run(websocket_response.send_bytes(message))
# todo send our audio back to call initiator
# register link state callbacks
server_destination.set_link_established_callback(client_connected)
# announce our call.audio destination
print("call.audio announced and waiting for connection: "+ RNS.prettyhexrep(server_destination.hash))
server_destination.announce()
# handle websocket messages until disconnected
# FIXME: we should send a type with the message, so we can send other data as well
async for msg in websocket_response:
msg: WSMessage = msg
if msg.type == WSMsgType.BINARY:
try:
# drop audio packet if it is too big to send
if len(msg.data) > RNS.Link.MDU:
print("dropping packet " + str(len(msg.data)) + " bytes exceeds the link packet MDU of " + str(RNS.Link.MDU) + " bytes")
continue
# send codec2 audio received from call receiver on websocket, to call initiator over reticulum link
if self.link_call_audio is not None:
print("sending bytes to call initiator: {}".format(len(msg.data)))
RNS.Packet(self.link_call_audio, msg.data).send()
else:
print("link to call initiator not available")
audio_call.send_audio_packet(msg.data)
except Exception as e:
# ignore errors while handling message
print("failed to process client message")
@@ -303,8 +330,35 @@ class ReticulumWebChat:
# ignore errors while handling message
print('ws connection error %s' % websocket_response.exception())
# unregister audio packet handler now that the websocket has been closed
audio_call.register_audio_packet_listener(on_audio_packet)
return websocket_response
# hangup calls
@routes.get("/api/v1/calls/{audio_call_link_hash}/hangup")
async def index(request):
# get path params
audio_call_link_hash = request.match_info.get("audio_call_link_hash", "")
# convert hash to bytes
audio_call_link_hash = bytes.fromhex(audio_call_link_hash)
# find audio call
audio_call = self.audio_call_manager.find_audio_call_by_link_hash(audio_call_link_hash)
if audio_call is None:
return web.json_response({
"message": "audio call not found",
}, status=404)
# hangup the call
audio_call.hangup()
return web.json_response({
"message": "call has been hungup",
})
# serve announces
@routes.get("/api/v1/announces")
async def index(request):
@@ -575,6 +629,9 @@ class ReticulumWebChat:
# send announce for lxmf
self.local_lxmf_destination.announce(app_data=self.config.display_name.get().encode("utf-8"))
# send announce for audio call
self.audio_call_manager.announce(app_data=self.config.display_name.get().encode("utf-8"))
# handle downloading a file from a nomadnet node
elif _type == "nomadnet.file.download":