Livekit

class pipecat.transports.services.livekit.LiveKitTransportMessageFrame(message: Any, participant_id: str | None = None)[source]

Bases: TransportMessageFrame

Parameters:
  • message (Any)

  • participant_id (str | None)

participant_id: str | None = None
class pipecat.transports.services.livekit.LiveKitTransportMessageUrgentFrame(message: Any, participant_id: str | None = None)[source]

Bases: TransportMessageUrgentFrame

Parameters:
  • message (Any)

  • participant_id (str | None)

participant_id: str | None = None
class pipecat.transports.services.livekit.LiveKitParams(*, camera_in_enabled=False, camera_out_enabled=False, camera_out_is_live=False, camera_out_width=1024, camera_out_height=768, camera_out_bitrate=800000, camera_out_framerate=30, camera_out_color_format='RGB', audio_out_enabled=False, audio_out_sample_rate=None, audio_out_channels=1, audio_out_bitrate=96000, audio_out_10ms_chunks=4, audio_out_mixer=None, audio_out_destinations=<factory>, audio_in_enabled=False, audio_in_sample_rate=None, audio_in_channels=1, audio_in_filter=None, audio_in_stream_on_start=True, audio_in_passthrough=True, video_in_enabled=False, video_out_enabled=False, video_out_is_live=False, video_out_width=1024, video_out_height=768, video_out_bitrate=800000, video_out_framerate=30, video_out_color_format='RGB', video_out_destinations=<factory>, vad_enabled=False, vad_audio_passthrough=False, vad_analyzer=None, turn_analyzer=None)[source]

Bases: TransportParams

Parameters:
  • camera_in_enabled (bool)

  • camera_out_enabled (bool)

  • camera_out_is_live (bool)

  • camera_out_width (int)

  • camera_out_height (int)

  • camera_out_bitrate (int)

  • camera_out_framerate (int)

  • camera_out_color_format (str)

  • audio_out_enabled (bool)

  • audio_out_sample_rate (int | None)

  • audio_out_channels (int)

  • audio_out_bitrate (int)

  • audio_out_10ms_chunks (int)

  • audio_out_mixer (BaseAudioMixer | Mapping[str | None, BaseAudioMixer] | None)

  • audio_out_destinations (List[str])

  • audio_in_enabled (bool)

  • audio_in_sample_rate (int | None)

  • audio_in_channels (int)

  • audio_in_filter (BaseAudioFilter | None)

  • audio_in_stream_on_start (bool)

  • audio_in_passthrough (bool)

  • video_in_enabled (bool)

  • video_out_enabled (bool)

  • video_out_is_live (bool)

  • video_out_width (int)

  • video_out_height (int)

  • video_out_bitrate (int)

  • video_out_framerate (int)

  • video_out_color_format (str)

  • video_out_destinations (List[str])

  • vad_enabled (bool)

  • vad_audio_passthrough (bool)

  • vad_analyzer (VADAnalyzer | None)

  • turn_analyzer (BaseTurnAnalyzer | None)

model_config: ClassVar[ConfigDict] = {'arbitrary_types_allowed': True}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class pipecat.transports.services.livekit.LiveKitCallbacks(*, on_connected, on_disconnected, on_participant_connected, on_participant_disconnected, on_audio_track_subscribed, on_audio_track_unsubscribed, on_data_received, on_first_participant_joined)[source]

Bases: BaseModel

Parameters:
  • on_connected (Callable[[], Awaitable[None]])

  • on_disconnected (Callable[[], Awaitable[None]])

  • on_participant_connected (Callable[[str], Awaitable[None]])

  • on_participant_disconnected (Callable[[str], Awaitable[None]])

  • on_audio_track_subscribed (Callable[[str], Awaitable[None]])

  • on_audio_track_unsubscribed (Callable[[str], Awaitable[None]])

  • on_data_received (Callable[[bytes, str], Awaitable[None]])

  • on_first_participant_joined (Callable[[str], Awaitable[None]])

on_connected: Callable[[], Awaitable[None]]
on_disconnected: Callable[[], Awaitable[None]]
on_participant_connected: Callable[[str], Awaitable[None]]
on_participant_disconnected: Callable[[str], Awaitable[None]]
on_audio_track_subscribed: Callable[[str], Awaitable[None]]
on_audio_track_unsubscribed: Callable[[str], Awaitable[None]]
on_data_received: Callable[[bytes, str], Awaitable[None]]
on_first_participant_joined: Callable[[str], Awaitable[None]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class pipecat.transports.services.livekit.LiveKitTransportClient(url, token, room_name, params, callbacks, transport_name)[source]

Bases: object

Parameters:
  • url (str)

  • token (str)

  • room_name (str)

  • params (LiveKitParams)

  • callbacks (LiveKitCallbacks)

  • transport_name (str)

property participant_id: str
property room: livekit.rtc.Room
async setup(setup)[source]
Parameters:

setup (FrameProcessorSetup)

async cleanup()[source]
async start(frame)[source]
Parameters:

frame (StartFrame)

connect()
async disconnect()[source]
async send_data(data, participant_id=None)[source]
Parameters:
  • data (bytes)

  • participant_id (str | None)

async publish_audio(audio_frame)[source]
Parameters:

audio_frame (livekit.rtc.AudioFrame)

get_participants()[source]
Return type:

List[str]

async get_participant_metadata(participant_id)[source]
Parameters:

participant_id (str)

Return type:

dict

async set_participant_metadata(metadata)[source]
Parameters:

metadata (str)

async mute_participant(participant_id)[source]
Parameters:

participant_id (str)

async unmute_participant(participant_id)[source]
Parameters:

participant_id (str)

async get_next_audio_frame()[source]
class pipecat.transports.services.livekit.LiveKitInputTransport(transport, client, params, **kwargs)[source]

Bases: BaseInputTransport

Parameters:
  • transport (BaseTransport)

  • client (LiveKitTransportClient)

  • params (LiveKitParams)

property vad_analyzer: VADAnalyzer | None
async start(frame)[source]
Parameters:

frame (StartFrame)

async stop(frame)[source]
Parameters:

frame (EndFrame)

async cancel(frame)[source]
Parameters:

frame (CancelFrame)

async setup(setup)[source]
Parameters:

setup (FrameProcessorSetup)

async cleanup()[source]
async push_app_message(message, sender)[source]
Parameters:
  • message (Any)

  • sender (str)

class pipecat.transports.services.livekit.LiveKitOutputTransport(transport, client, params, **kwargs)[source]

Bases: BaseOutputTransport

Parameters:
  • transport (BaseTransport)

  • client (LiveKitTransportClient)

  • params (LiveKitParams)

async start(frame)[source]
Parameters:

frame (StartFrame)

async stop(frame)[source]
Parameters:

frame (EndFrame)

async cancel(frame)[source]
Parameters:

frame (CancelFrame)

async setup(setup)[source]
Parameters:

setup (FrameProcessorSetup)

async cleanup()[source]
async send_message(frame)[source]
Parameters:

frame (TransportMessageFrame | TransportMessageUrgentFrame)

async write_audio_frame(frame)[source]
Parameters:

frame (OutputAudioRawFrame)

class pipecat.transports.services.livekit.LiveKitTransport(url, token, room_name, params=None, input_name=None, output_name=None)[source]

Bases: BaseTransport

Parameters:
  • url (str)

  • token (str)

  • room_name (str)

  • params (LiveKitParams | None)

  • input_name (str | None)

  • output_name (str | None)

input()[source]
Return type:

LiveKitInputTransport

output()[source]
Return type:

LiveKitOutputTransport

property participant_id: str
async send_audio(frame)[source]
Parameters:

frame (OutputAudioRawFrame)

get_participants()[source]
Return type:

List[str]

async get_participant_metadata(participant_id)[source]
Parameters:

participant_id (str)

Return type:

dict

async set_metadata(metadata)[source]
Parameters:

metadata (str)

async mute_participant(participant_id)[source]
Parameters:

participant_id (str)

async unmute_participant(participant_id)[source]
Parameters:

participant_id (str)

async send_message(message, participant_id=None)[source]
Parameters:
  • message (str)

  • participant_id (str | None)

async send_message_urgent(message, participant_id=None)[source]
Parameters:
  • message (str)

  • participant_id (str | None)

async on_room_event(event)[source]
async on_participant_event(event)[source]
async on_track_event(event)[source]