Quick Start
Get up and running with Xybrid in minutes
Get Xybrid running and execute your first pipeline in under 5 minutes.
Installation
Add the Xybrid SDK to your project:
# pubspec.yaml
dependencies:
xybrid_flutter: ^0.1.0flutter pub get// Package.swift
dependencies: [
.package(url: "https://github.com/xybrid-ai/xybrid-swift.git", from: "0.1.0")
]// build.gradle.kts
dependencies {
implementation("ai.xybrid:xybrid-android:0.1.0")
}// build.gradle.kts
kotlin {
sourceSets {
commonMain.dependencies {
implementation("ai.xybrid:xybrid-kmp:0.1.0")
}
}
}For CLI installation, see CLI Installation.
Run
Pipeline
Pipelines chain multiple models together. Define a voice-assistant.yaml:
name: voice-assistant
stages:
# Speech recognition (runs on device)
- whisper-tiny@1.0
# Language model (routes to cloud)
- target: integration
provider: openai
model: gpt-4o-mini
# Text-to-speech (runs on device)
- kokoro-82m@0.1Run it:
import 'package:xybrid_flutter/xybrid_flutter.dart';
await Xybrid.init();
final loader = Xybrid.pipeline('assets/voice-assistant.yaml');
final pipeline = await loader.load();
final audioBytes = await recorder.readAudioBytes(recordingPath);
final envelope = Envelope.audio(
audioBytes: audioBytes,
sampleRate: 16000,
channels: 1,
);
final result = await pipeline.run(envelope: envelope);
await audioPlayer.play(result.audio);import Xybrid
let xybrid = Xybrid()
let audioData = recorder.stop()
let result = try await xybrid.runPipeline(
pipeline: "voice-assistant",
input: .audio(data: audioData)
)
audioPlayer.play(result.audio)import ai.xybrid.Xybrid
import ai.xybrid.Envelope
val xybrid = Xybrid()
val audioBytes = recorder.stop()
val result = xybrid.runPipeline(
pipeline = "voice-assistant",
input = Envelope.audio(audioBytes)
)
audioPlayer.play(result.audio)import ai.xybrid.Xybrid
import ai.xybrid.Envelope
val xybrid = Xybrid()
val audioBytes = recorder.stop()
val result = xybrid.runPipeline(
pipeline = "voice-assistant",
input = Envelope.audio(audioBytes)
)
playAudio(result.audio)Single Model
Run a single model directly:
final result = await xybrid.runModel(
modelId: 'whisper-tiny',
version: '1.0',
input: Envelope.audio(bytes: audioBytes),
);
print(result.text); // "Hello, how can I help you?"let result = try await xybrid.runModel(
modelId: "whisper-tiny",
version: "1.0",
input: .audio(data: audioData)
)
print(result.text) // "Hello, how can I help you?"val result = xybrid.runModel(
modelId = "whisper-tiny",
version = "1.0",
input = Envelope.audio(audioBytes)
)
println(result.text) // "Hello, how can I help you?"val result = xybrid.runModel(
modelId = "whisper-tiny",
version = "1.0",
input = Envelope.audio(audioBytes)
)
println(result.text) // "Hello, how can I help you?"Handle Results
try {
final result = await pipeline.run(envelope: envelope);
// Access different result types
if (result.hasText) {
print('Transcription: ${result.text}');
}
if (result.hasAudio) {
await audioPlayer.play(result.audio);
}
} on XybridException catch (e) {
print('Pipeline failed: ${e.message}');
}do {
let result = try await xybrid.runPipeline(
pipeline: "voice-assistant",
input: .audio(data: audioData)
)
if let text = result.text {
print("Transcription: \(text)")
}
if let audio = result.audio {
audioPlayer.play(audio)
}
} catch let error as XybridError {
print("Pipeline failed: \(error.message)")
}try {
val result = xybrid.runPipeline(
pipeline = "voice-assistant",
input = Envelope.audio(audioBytes)
)
result.text?.let { println("Transcription: $it") }
result.audio?.let { audioPlayer.play(it) }
} catch (e: XybridException) {
println("Pipeline failed: ${e.message}")
}try {
val result = xybrid.runPipeline(
pipeline = "voice-assistant",
input = Envelope.audio(audioBytes)
)
result.text?.let { println("Transcription: $it") }
result.audio?.let { playAudio(it) }
} catch (e: XybridException) {
println("Pipeline failed: ${e.message}")
}