|
| 1 | +""" |
| 2 | +Audio stream manager for live_avatar mode. |
| 3 | +
|
| 4 | +Mirrors the JS SDK's AudioStreamManager — ensures WebRTC always has |
| 5 | +audio frames to send even when no user mic/audio is provided. |
| 6 | +""" |
| 7 | + |
| 8 | +import asyncio |
| 9 | +import fractions |
| 10 | +import io |
| 11 | +import logging |
| 12 | +from collections import deque |
| 13 | +from pathlib import Path |
| 14 | +from typing import Optional, Union |
| 15 | + |
| 16 | +import av |
| 17 | +from aiortc import MediaStreamTrack |
| 18 | + |
| 19 | +logger = logging.getLogger(__name__) |
| 20 | + |
| 21 | +SAMPLE_RATE = 48000 |
| 22 | +SAMPLES_PER_FRAME = 960 # 20ms at 48kHz |
| 23 | +BYTES_PER_SAMPLE = 2 # s16 format |
| 24 | +BYTES_PER_FRAME = SAMPLES_PER_FRAME * BYTES_PER_SAMPLE |
| 25 | + |
| 26 | + |
| 27 | +def _make_silence_frame() -> av.AudioFrame: |
| 28 | + frame = av.AudioFrame(samples=SAMPLES_PER_FRAME, layout="mono", format="s16") |
| 29 | + for plane in frame.planes: |
| 30 | + plane.update(bytes(BYTES_PER_FRAME)) |
| 31 | + return frame |
| 32 | + |
| 33 | + |
| 34 | +class _AudioTrack(MediaStreamTrack): |
| 35 | + kind = "audio" |
| 36 | + |
| 37 | + def __init__(self) -> None: |
| 38 | + super().__init__() |
| 39 | + self._queue: deque[av.AudioFrame] = deque() |
| 40 | + self._pts = 0 |
| 41 | + self._start: Optional[float] = None |
| 42 | + self._done_event: Optional[asyncio.Event] = None |
| 43 | + |
| 44 | + async def recv(self) -> av.AudioFrame: |
| 45 | + if self._start is None: |
| 46 | + self._start = asyncio.get_event_loop().time() |
| 47 | + |
| 48 | + target = self._start + (self._pts / SAMPLE_RATE) |
| 49 | + delay = target - asyncio.get_event_loop().time() |
| 50 | + if delay > 0: |
| 51 | + await asyncio.sleep(delay) |
| 52 | + |
| 53 | + if self._queue: |
| 54 | + frame = self._queue.popleft() |
| 55 | + if not self._queue and self._done_event: |
| 56 | + self._done_event.set() |
| 57 | + self._done_event = None |
| 58 | + else: |
| 59 | + frame = _make_silence_frame() |
| 60 | + |
| 61 | + frame.pts = self._pts |
| 62 | + frame.sample_rate = SAMPLE_RATE |
| 63 | + frame.time_base = fractions.Fraction(1, SAMPLE_RATE) |
| 64 | + self._pts += SAMPLES_PER_FRAME |
| 65 | + |
| 66 | + return frame |
| 67 | + |
| 68 | + def enqueue(self, frames: list[av.AudioFrame], done: asyncio.Event) -> None: |
| 69 | + self._queue.extend(frames) |
| 70 | + self._done_event = done |
| 71 | + |
| 72 | + def clear(self) -> None: |
| 73 | + self._queue.clear() |
| 74 | + if self._done_event: |
| 75 | + self._done_event.set() |
| 76 | + self._done_event = None |
| 77 | + |
| 78 | + |
| 79 | +class AudioStreamManager: |
| 80 | + """Manages audio for live_avatar mode. |
| 81 | +
|
| 82 | + Provides a continuous audio track that outputs silence by default |
| 83 | + and allows playing audio data through it via play_audio(). |
| 84 | + """ |
| 85 | + |
| 86 | + def __init__(self) -> None: |
| 87 | + self._track = _AudioTrack() |
| 88 | + self._playing = False |
| 89 | + |
| 90 | + def get_track(self) -> MediaStreamTrack: |
| 91 | + return self._track |
| 92 | + |
| 93 | + @property |
| 94 | + def is_playing(self) -> bool: |
| 95 | + return self._playing |
| 96 | + |
| 97 | + async def play_audio(self, audio: Union[bytes, str, Path]) -> None: |
| 98 | + """Play audio through the stream. Resolves when audio finishes playing. |
| 99 | +
|
| 100 | + Args: |
| 101 | + audio: Audio data as bytes, file path string, or Path object. |
| 102 | + """ |
| 103 | + if self._playing: |
| 104 | + self.stop_audio() |
| 105 | + |
| 106 | + if isinstance(audio, bytes): |
| 107 | + container: av.InputContainer = av.open(io.BytesIO(audio)) # type: ignore[assignment] |
| 108 | + else: |
| 109 | + container: av.InputContainer = av.open(str(audio)) # type: ignore[assignment] |
| 110 | + |
| 111 | + try: |
| 112 | + resampler = av.AudioResampler(format="s16", layout="mono", rate=SAMPLE_RATE) |
| 113 | + raw = bytearray() |
| 114 | + |
| 115 | + for frame in container.decode(audio=0): |
| 116 | + for resampled in resampler.resample(frame): |
| 117 | + raw.extend(bytes(resampled.planes[0])) |
| 118 | + |
| 119 | + for resampled in resampler.resample(None): |
| 120 | + raw.extend(bytes(resampled.planes[0])) |
| 121 | + finally: |
| 122 | + container.close() |
| 123 | + |
| 124 | + if not raw: |
| 125 | + return |
| 126 | + |
| 127 | + frames = [] |
| 128 | + for i in range(0, len(raw), BYTES_PER_FRAME): |
| 129 | + chunk = raw[i : i + BYTES_PER_FRAME] |
| 130 | + if len(chunk) < BYTES_PER_FRAME: |
| 131 | + chunk.extend(bytes(BYTES_PER_FRAME - len(chunk))) |
| 132 | + |
| 133 | + frame = av.AudioFrame(samples=SAMPLES_PER_FRAME, layout="mono", format="s16") |
| 134 | + frame.planes[0].update(bytes(chunk)) |
| 135 | + frames.append(frame) |
| 136 | + |
| 137 | + done = asyncio.Event() |
| 138 | + self._playing = True |
| 139 | + self._track.enqueue(frames, done) |
| 140 | + |
| 141 | + await done.wait() |
| 142 | + self._playing = False |
| 143 | + |
| 144 | + def stop_audio(self) -> None: |
| 145 | + self._track.clear() |
| 146 | + self._playing = False |
| 147 | + |
| 148 | + def cleanup(self) -> None: |
| 149 | + self.stop_audio() |
| 150 | + self._track.stop() |
0 commit comments