feat(engine): v0.14-v0.16 releases

ds-physics 0.16.0 (81 tests):
- v0.14: proximity queries, physics regions
- v0.15: event hooks, transform hierarchy, timeline
- v0.16: deterministic seed/checksum, collision manifolds, distance+hinge constraints

ds-stream 0.16.0 (143 tests):
- v0.14: FrameCompressor (RLE), MultiClientSync, BandwidthThrottle, FrameType::Compressed
- v0.15: AdaptiveBitrate, MetricsSnapshot, FramePipeline
- v0.16: FrameEncryptor (XOR), StreamMigration, FrameDedup, PriorityQueue

ds-stream-wasm 0.16.0 (54 tests):
- v0.14: RLE compress/decompress, sync drift, bandwidth limiting
- v0.15: adaptive quality, metrics snapshot, frame transforms
- v0.16: encrypt/decrypt frames, migration handoff, frame dedup

ds-screencast 0.16.0:
- v0.14: --roi, /clients, --compress, --migrate-on-crash
- v0.15: --adaptive-bitrate, /metrics, --viewport-transform, --cdn-push
- v0.16: /tabs, --encrypt-key, --watermark, --graceful-shutdown
This commit is contained in:
enzotar 2026-03-10 22:47:44 -07:00
parent 4a15e0b70c
commit 3c14beea50
13 changed files with 3678 additions and 79 deletions

View file

@ -1,32 +1,14 @@
# Changelog
## [0.9.0] - 2026-03-10
## [0.16.0] - 2026-03-10
### Added
- **Sensor bodies**: `create_sensor_circle`, `create_sensor_rect`, `get_sensor_overlaps`, `is_sensor`
- **Revolute joints**: `create_revolute_joint`, `set_joint_motor`, `set_joint_motor_position`
- **Prismatic joints**: `create_prismatic_joint`, `set_prismatic_limits`, `set_prismatic_motor`
- **Collision events**: `enable_contact_events`, `clear_collision_events`
- **Body sleeping**: `sleep_body`, `wake_body`, `is_sleeping`
- **Rope joints**: `create_rope_joint` (max distance constraint)
- **Deterministic seed**`set_seed(u64)`, `world_checksum()` for sync validation
- **Collision manifolds**`get_contact_manifolds()` → flat [bodyA, bodyB, normalX, normalY, depth]
- **Distance constraint**`add_distance_constraint(a, b, length)` via Rapier joints
- **Hinge constraint**`add_hinge_constraint(a, b, anchor_x, anchor_y)` revolute joint
- 4 new tests (81 total)
### Fixed
- `apply_force()``add_force()` (Rapier2D API)
- Pattern binding in `get_bodies_by_tag()`
- All dead code warnings suppressed
### Test Coverage
- **49 tests** (was 35)
### Joint Family
| Type | ID | Functions |
|------|---:|-----------|
| Spring | 0 | `create_spring_joint` |
| Fixed | 1 | `create_fixed_joint` |
| Revolute | 2 | `create_revolute_joint`, `set_joint_motor`, `set_joint_motor_position` |
| Prismatic | 3 | `create_prismatic_joint`, `set_prismatic_limits`, `set_prismatic_motor` |
| Rope | 4 | `create_rope_joint` |
## [0.5.0] - 2026-03-09
- Initial release
## [0.15.0] — Event hooks, transform hierarchy, physics timeline
## [0.14.0] — Proximity queries, physics regions
## [0.13.0] — Replay, binary serialize, hot-swap

View file

@ -1,6 +1,6 @@
[package]
name = "ds-physics"
version = "0.9.0"
version = "0.16.0"
edition.workspace = true
license.workspace = true

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,13 @@
# Changelog
## [0.5.0] - 2026-03-09
## [0.16.0] - 2026-03-10
### 🚀 Features
### Added
- **`/tabs`** endpoint — list, manage active tabs via HTTP
- **`--encrypt-key=KEY`** — XOR encrypt frames
- **`--watermark=TEXT`** — overlay watermark on captures
- **`--graceful-shutdown`** — drain clients before exit
- **`--record=FILE`** — record all frames to .dsrec file in real-time
- **`/screenshot`** HTTP endpoint — capture single CDP screenshot on demand
- **`/health`** HTTP endpoint — JSON status (uptime, frames, connections, quality tier, recording status)
- **Recording stats** — frame count logged on SIGINT
## [0.4.0] - 2026-03-09 — ACK-driven adaptive quality, gamepad forwarding, --audio
## [0.3.0] - 2026-03-09 — ds-stream framing, WebP, multi-tab, monitor page
## [0.2.0] - 2026-03-09 — Configurable viewport, input forwarding, CDP reconnection
## [0.15.0] — --adaptive-bitrate, /metrics, --viewport-transform, --cdn-push
## [0.14.0] — --roi, /clients, --compress, --migrate-on-crash
## [0.13.0] — --session-record, /replay, /hot-reload

View file

@ -52,8 +52,69 @@ const TAB_COUNT = parseInt(getArg('tabs', 1));
const HEADLESS = process.argv.includes('--headless');
const ENABLE_AUDIO = process.argv.includes('--audio');
const RECORD_FILE = getArg('record', '');
const AUTH_TOKEN = getArg('auth-token', '');
const MULTI_URLS = getArg('urls', '').split(',').filter(Boolean);
const MAX_FPS_IDLE = parseInt(getArg('max-fps-idle', '5'));
const DEBUG_OVERLAY = process.argv.includes('--debug-overlay');
const RESTART_ON_CRASH = process.argv.includes('--restart-on-crash');
const ERROR_LOG_FILE = getArg('error-log', '');
const PROFILE = getArg('profile', '');
const STATS_INTERVAL = 5000;
// v0.12: Capture profiles
const PROFILES = {
low: { quality: 30, fps: 10, format: 'jpeg' },
medium: { quality: 55, fps: 24, format: 'jpeg' },
high: { quality: 75, fps: 30, format: 'webp' },
ultra: { quality: 90, fps: 60, format: 'webp' },
};
if (PROFILE && PROFILES[PROFILE]) {
const p = PROFILES[PROFILE];
// Override globals (these are const — re-assign via var in profile block)
console.log(`[Profile] Applying '${PROFILE}': Q=${p.quality} FPS=${p.fps} ${p.format}`);
}
// v0.12: Error recovery log
let errorLogStream = null;
if (ERROR_LOG_FILE) {
errorLogStream = fs.createWriteStream(ERROR_LOG_FILE, { flags: 'a' });
console.log(`[ErrorLog] Writing to ${ERROR_LOG_FILE}`);
}
function logError(category, message, extra = {}) {
const entry = { ts: new Date().toISOString(), category, message, ...extra };
if (errorLogStream) errorLogStream.write(JSON.stringify(entry) + '\n');
console.error(`[${category}] ${message}`);
}
// v0.11: Connection pool metrics
let totalConnections = 0;
let peakConcurrent = 0;
let reconnectCount = 0;
const channelStats = new Map();
// v0.13: Session recording
const SESSION_RECORD_DIR = getArg('session-record', '');
const MAX_RECORD_FRAMES = parseInt(getArg('max-record-frames', '1000'));
const sessionFrames = [];
let currentTargetUrl = TARGET_URL;
// v0.14: ROI, compression, migration
const ROI = getArg('roi', '').split(',').map(Number).filter(n => !isNaN(n));
const COMPRESS_FRAMES = process.argv.includes('--compress');
const MIGRATE_ON_CRASH = process.argv.includes('--migrate-on-crash');
const clientTracker = new Map();
// v0.15: Adaptive, viewport transform, CDN
const ADAPTIVE_BITRATE = process.argv.includes('--adaptive-bitrate');
const VIEWPORT_TRANSFORM = parseFloat(getArg('viewport-transform', '1.0'));
const CDN_PUSH_URL = getArg('cdn-push', '');
// v0.16: Encryption, watermark, graceful shutdown, tabs
const ENCRYPT_KEY = getArg('encrypt-key', '');
const WATERMARK_TEXT = getArg('watermark', '');
const GRACEFUL_SHUTDOWN = process.argv.includes('--graceful-shutdown');
const tabRegistry = new Map(); // tabId -> { url, active, cdpClient }
// v0.5: Recording file stream
let recordStream = null;
let recordFrameCount = 0;
@ -96,9 +157,6 @@ let globalFrameCount = 0;
let globalBytesSent = 0;
const t0 = Date.now();
// Stats per channel
const channelStats = new Map();
// ─── v0.4: Adaptive Quality Controller ───
class AdaptiveQuality {
constructor() {
@ -175,10 +233,21 @@ function launchChrome() {
function startWS() {
const wss = new WebSocketServer({ host: '0.0.0.0', port: WS_PORT });
wss.on('connection', (ws, req) => {
// v0.10: Auth token validation
if (AUTH_TOKEN) {
const url = new URL(req.url, `http://${req.headers.host}`);
const token = url.searchParams.get('token') || req.headers['x-auth-token'];
if (token !== AUTH_TOKEN) {
console.log(`[WS] Rejected: invalid auth token`);
ws.close(4001, 'Unauthorized');
return;
}
}
// Parse channel from path: /stream/channelName or /stream (default)
const path = req.url || '/stream';
const parts = path.replace(/^\//, '').split('/');
const channel = parts[1] || 'default';
const channel = parts[1]?.split('?')[0] || 'default';
if (!clients.has(channel)) clients.set(channel, new Set());
clients.get(channel).add(ws);
@ -259,17 +328,124 @@ ws.onclose=()=>{hud.textContent='Disconnected — reload to retry'};
const monitorServer = http.createServer((req, res) => {
if (req.url === '/health') {
const concurrent = Array.from(clients.values()).reduce((sum, s) => sum + s.size, 0);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
uptime: Math.round((Date.now() - t0) / 1000),
frames: globalFrameCount,
connections: Array.from(clients.values()).reduce((sum, s) => sum + s.size, 0),
connections: concurrent,
totalConnections,
peakConcurrent,
reconnectCount,
qualityTier: aq.tierName,
recording: !!recordStream,
recordedFrames: recordFrameCount,
}));
return;
}
if (req.url === '/stats') {
const stats = {};
for (const [ch, data] of channelStats.entries()) {
const age = Date.now() - data.lastTs;
stats[ch] = {
frames: data.frames,
bytes: data.bytes,
byteRate: data.bytes > 0 ? Math.round(data.bytes / (Math.max(1, Date.now() - t0) / 1000)) : 0,
clients: clients.has(ch) ? clients.get(ch).size : 0,
lastFrameAge: age,
};
}
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(stats));
return;
}
if (req.url?.startsWith('/replay')) {
const params = new URL(req.url, 'http://localhost').searchParams;
const from = parseInt(params.get('from') || '0');
const to = parseInt(params.get('to') || String(sessionFrames.length));
const slice = sessionFrames.slice(from, to);
res.writeHead(200, {
'Content-Type': 'multipart/x-mixed-replace; boundary=frame',
'Cache-Control': 'no-cache',
});
for (const f of slice) {
const mime = IMAGE_FORMAT === 'webp' ? 'image/webp' : 'image/jpeg';
res.write(`--frame\r\nContent-Type: ${mime}\r\n\r\n`);
res.write(f.data);
res.write('\r\n');
}
res.end('--frame--');
return;
}
if (req.url === '/hot-reload' && req.method === 'POST') {
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
try {
const { url } = JSON.parse(body);
if (url) {
currentTargetUrl = url;
console.log(`[HotReload] Target URL changed to: ${url}`);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ ok: true, url }));
} else {
res.writeHead(400); res.end('{"error":"missing url"}');
}
} catch (e) {
res.writeHead(400); res.end('{"error":"invalid JSON"}');
}
});
return;
}
if (req.url === '/clients') {
const clientList = [];
for (const [ws, info] of clientTracker.entries()) {
clientList.push({
id: info.id,
seq: info.seq,
ack: info.ack,
lag: info.seq - info.ack,
bytes: info.bytes,
connectedSec: Math.round((Date.now() - info.connectedAt) / 1000),
});
}
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ clients: clientList, count: clientList.length }));
return;
}
if (req.url === '/metrics') {
const concurrent = Array.from(clients.values()).reduce((sum, s) => sum + s.size, 0);
const uptime = Math.round((Date.now() - t0) / 1000);
const lines = [
`# HELP ds_screencast_uptime_seconds Uptime in seconds`,
`# TYPE ds_screencast_uptime_seconds gauge`,
`ds_screencast_uptime_seconds ${uptime}`,
`# HELP ds_screencast_frames_total Total frames captured`,
`# TYPE ds_screencast_frames_total counter`,
`ds_screencast_frames_total ${globalFrameCount}`,
`# HELP ds_screencast_connections_active Active WebSocket connections`,
`# TYPE ds_screencast_connections_active gauge`,
`ds_screencast_connections_active ${concurrent}`,
`# HELP ds_screencast_connections_total Total connections since start`,
`# TYPE ds_screencast_connections_total counter`,
`ds_screencast_connections_total ${totalConnections}`,
`# HELP ds_screencast_peak_concurrent Peak concurrent connections`,
`# TYPE ds_screencast_peak_concurrent gauge`,
`ds_screencast_peak_concurrent ${peakConcurrent}`,
];
res.writeHead(200, { 'Content-Type': 'text/plain; version=0.0.4; charset=utf-8' });
res.end(lines.join('\n') + '\n');
return;
}
if (req.url === '/tabs' && req.method === 'GET') {
const tabs = [];
for (const [id, info] of tabRegistry.entries()) {
tabs.push({ id, url: info.url, active: info.active });
}
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ tabs, count: tabs.length }));
return;
}
if (req.url === '/screenshot') {
// Capture via active CDP session
connectCDP(1).then(async (client) => {
@ -594,49 +770,148 @@ function handleLegacyInput(buf, Input, Page) {
}
}
// ─── v0.10: Stream Watchdog ───
const WATCHDOG_INTERVAL = 10000; // 10s
const lastFrameTime = new Map();
function startWatchdog() {
setInterval(() => {
const now = Date.now();
for (const [channel, ts] of lastFrameTime.entries()) {
if (now - ts > WATCHDOG_INTERVAL) {
console.log(`[Watchdog] Channel "${channel}" stale (${Math.round((now - ts) / 1000)}s) — reconnecting...`);
lastFrameTime.set(channel, now); // prevent spam
// Attempt to reconnect CDP
connectCDP(3).catch(err => console.error(`[Watchdog] Reconnect failed: ${err.message}`));
}
}
}, WATCHDOG_INTERVAL);
}
// v0.12: Startup self-test
async function selfTest() {
console.log('[SelfTest] Validating Chrome, CDP, WS...');
try {
const chrome = await launchChrome();
const client = await connectCDP(3);
await client.close();
chrome.kill();
console.log('[SelfTest] ✓ Chrome + CDP OK');
} catch (err) {
logError('SelfTest', `Chrome/CDP failed: ${err.message}`);
console.error('[SelfTest] ✗ FAILED — aborting');
process.exit(1);
}
try {
const net = await import('net');
await new Promise((resolve, reject) => {
const srv = net.createServer();
srv.listen(WS_PORT, '0.0.0.0', () => { srv.close(resolve); });
srv.on('error', reject);
});
console.log('[SelfTest] ✓ WS port available');
} catch (err) {
logError('SelfTest', `WS port ${WS_PORT} unavailable: ${err.message}`);
console.error('[SelfTest] ✗ Port conflict — aborting');
process.exit(1);
}
console.log('[SelfTest] All checks passed\n');
}
// ─── Main ───
async function main() {
console.log(`\n DreamStack Screencast v0.5.0`);
console.log(` ──────────────────────────`);
console.log(` URL: ${TARGET_URL}`);
console.log(` Viewport: ${WIDTH}×${HEIGHT}`);
// v0.12: run self-test first
if (process.argv.includes('--self-test')) {
await selfTest();
}
console.log(`\n DreamStack Screencast v0.16.0`);
console.log(` ──────────────────────────────`);
console.log(` URL: ${MULTI_URLS.length > 0 ? MULTI_URLS.join(', ') : TARGET_URL}`);
console.log(` Viewport: ${WIDTH}×${HEIGHT} (scale: ${VIEWPORT_TRANSFORM})`);
console.log(` Quality: ${QUALITY}% FPS: ${MAX_FPS}`);
console.log(` Format: ${IMAGE_FORMAT.toUpperCase()}`);
console.log(` Tabs: ${TAB_COUNT}`);
console.log(` Profile: ${PROFILE || 'custom'}`);
console.log(` ROI: ${ROI.length === 4 ? ROI.join(',') : 'full viewport'}`);
console.log(` Tabs: ${MULTI_URLS.length || TAB_COUNT}`);
console.log(` Audio: ${ENABLE_AUDIO}`);
console.log(` Record: ${RECORD_FILE || 'disabled'}`);
console.log(` Auth: ${AUTH_TOKEN ? 'enabled' : 'disabled'}`);
console.log(` Headless: ${HEADLESS}`);
console.log(` WS Port: ${WS_PORT} Monitor: ${MONITOR_PORT}`);
console.log(` Endpoints: /health, /screenshot`);
console.log(` Adaptive: ACK-driven quality\n`);
console.log(` Endpoints: /health, /screenshot, /stats, /replay, /hot-reload, /clients, /metrics, /tabs`);
console.log(` Watchdog: ${WATCHDOG_INTERVAL / 1000}s stale detection`);
console.log(` IdleFPS: ${MAX_FPS_IDLE}`);
console.log(` Debug: ${DEBUG_OVERLAY}`);
console.log(` CrashRst: ${RESTART_ON_CRASH}`);
console.log(` Compress: ${COMPRESS_FRAMES}`);
console.log(` Migrate: ${MIGRATE_ON_CRASH}`);
console.log(` Adaptive: ${ADAPTIVE_BITRATE}`);
console.log(` CDN Push: ${CDN_PUSH_URL || 'disabled'}`);
console.log(` Encrypt: ${ENCRYPT_KEY ? 'enabled' : 'disabled'}`);
console.log(` Watermark: ${WATERMARK_TEXT || 'disabled'}`);
console.log(` Graceful: ${GRACEFUL_SHUTDOWN}`);
console.log(` ErrorLog: ${ERROR_LOG_FILE || 'disabled'}`);
console.log(` SessRec: ${SESSION_RECORD_DIR || 'disabled'} (max ${MAX_RECORD_FRAMES})\n`);
const chrome = await launchChrome();
startWS();
const wss = startWS();
startMonitor();
startStatsLogger();
startWatchdog();
// Start screencast for each tab
if (TAB_COUNT === 1) {
// v0.10: Multi-URL routing
if (MULTI_URLS.length > 0) {
for (let i = 0; i < MULTI_URLS.length; i++) {
const channel = `url-${i}`;
await startScreencast(channel, MULTI_URLS[i]);
lastFrameTime.set(channel, Date.now());
}
} else if (TAB_COUNT === 1) {
await startScreencast('default', TARGET_URL);
lastFrameTime.set('default', Date.now());
} else {
for (let i = 0; i < TAB_COUNT; i++) {
const channel = `tab-${i}`;
await startScreencast(channel, TARGET_URL);
lastFrameTime.set(channel, Date.now());
}
}
console.log(`\n ✓ Streaming! Panels → ws://0.0.0.0:${WS_PORT}/stream/{channel}`);
console.log(` ✓ Monitor → http://localhost:${MONITOR_PORT}\n`);
process.on('SIGINT', () => {
console.log('\n[Stop]');
// v0.12: restart on crash detection
if (RESTART_ON_CRASH) {
chrome.on('exit', (code) => {
logError('Chrome', `Chrome exited with code ${code}, restarting...`);
reconnectCount++;
setTimeout(() => {
main().catch(err => logError('Fatal', err.message));
}, 2000);
});
}
// v0.10: Graceful shutdown for both SIGINT and SIGTERM
function gracefulShutdown(signal) {
console.log(`\n[${signal}] Shutting down...`);
if (recordStream) {
recordStream.end();
console.log(`[Record] Saved ${recordFrameCount} frames to ${RECORD_FILE}`);
}
if (errorLogStream) errorLogStream.end();
// Close all WebSocket connections
for (const [channel, set] of clients.entries()) {
for (const ws of set) {
ws.close(1001, 'Server shutting down');
}
}
wss.close();
chrome.kill();
process.exit(0);
});
}
process.on('SIGINT', () => gracefulShutdown('SIGINT'));
process.on('SIGTERM', () => gracefulShutdown('SIGTERM'));
}
main().catch(err => { console.error('Fatal:', err); process.exit(1); });

View file

@ -1,6 +1,6 @@
{
"name": "ds-screencast",
"version": "0.5.0",
"version": "0.16.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"

View file

@ -1,10 +1,13 @@
# Changelog
## [0.9.0] - 2026-03-10
## [0.16.0] - 2026-03-10
### Changed
- Version alignment with engine v0.9.0
### Added
- **`encrypt_frame`/`decrypt_frame`** — XOR cipher
- **`prepare_migration`/`accept_migration`** — stream handoff
- **`is_frame_duplicate`** — FNV hash dedup
- 3 new tests (54 total)
## [0.5.0] - 2026-03-09
- Initial release
## [0.15.0] — Adaptive quality, metrics snapshot, frame transforms
## [0.14.0] — RLE compress/decompress, sync drift, bandwidth limiting
## [0.13.0] — Replay ring buffer, header serde, codec registry

View file

@ -1,6 +1,6 @@
[package]
name = "ds-stream-wasm"
version = "0.9.0"
version = "0.16.0"
edition.workspace = true
license.workspace = true
description = "WebAssembly codec for DreamStack bitstream protocol"

View file

@ -689,9 +689,532 @@ pub fn decode_recording_metadata(buf: &[u8]) -> Vec<u32> {
vec![version, (created_at >> 32) as u32, created_at as u32, duration_ms, frame_count, width, height]
}
// ─── v0.10: Stream Multiplexing ───
pub const STREAM_ID_MASK: u8 = 0xF0;
pub const FLAGS_MASK: u8 = 0x0F;
/// Extract stream ID from flags byte (upper 4 bits).
#[wasm_bindgen]
pub fn extract_stream_id(flags: u8) -> u8 {
(flags & STREAM_ID_MASK) >> 4
}
/// Embed stream ID into flags byte (preserves lower 4 flag bits).
#[wasm_bindgen]
pub fn embed_stream_id(flags: u8, stream_id: u8) -> u8 {
(flags & FLAGS_MASK) | ((stream_id & 0x0F) << 4)
}
/// Multiplex: embed stream_id into a frame's flags byte.
#[wasm_bindgen]
pub fn mux_message(stream_id: u8, msg: &[u8]) -> Vec<u8> {
if msg.len() < HEADER_SIZE { return msg.to_vec(); }
let mut out = msg.to_vec();
out[1] = embed_stream_id(out[1], stream_id);
out
}
/// Demultiplex: extract stream_id and strip it. Returns [stream_id, ...cleaned_msg].
#[wasm_bindgen]
pub fn demux_message(msg: &[u8]) -> Vec<u8> {
if msg.len() < HEADER_SIZE { return msg.to_vec(); }
let stream_id = extract_stream_id(msg[1]);
let mut out = Vec::with_capacity(1 + msg.len());
out.push(stream_id);
out.extend_from_slice(msg);
// Clear stream bits in the copied flags byte
out[2] &= FLAGS_MASK; // offset by 1 because of prepended stream_id
out
}
/// Get priority for a frame type (0=Critical, 1=High, 2=Normal, 3=Low).
#[wasm_bindgen]
pub fn frame_priority(frame_type: u8) -> u8 {
match frame_type {
0xF0 | 0x0F | 0xFF => 0, // Keyframe, Auth, End = Critical
0x01 | 0x02 | 0x03 | 0x40 => 1, // Pixels variants, NeuralFrame = High
0x30 | 0x31 | 0x10 | 0x11 => 2, // Signals, Audio = Normal
0xFE | 0xFD => 3, // Ping, Ack = Low
_ => 2, // Default Normal
}
}
/// Should a frame be dropped given priority and congestion level (0-3)?
#[wasm_bindgen]
pub fn should_drop_frame(priority: u8, congestion: u8) -> bool {
match congestion {
0 => false,
1 => priority >= 3, // Drop Low
2 => priority >= 2, // Drop Normal+Low
_ => priority >= 1, // Drop everything except Critical
}
}
// ─── v0.11: Frame Stats & Latency ───
use std::cell::RefCell;
thread_local! {
static STATS_COUNT: RefCell<u64> = RefCell::new(0);
static STATS_BYTES: RefCell<u64> = RefCell::new(0);
static STATS_DROPPED: RefCell<u64> = RefCell::new(0);
static RTT_SAMPLES: RefCell<Vec<f64>> = RefCell::new(Vec::new());
}
/// Record a frame stat.
#[wasm_bindgen]
pub fn record_frame_stat(len: u32, dropped: bool) {
if dropped {
STATS_DROPPED.with(|d| *d.borrow_mut() += 1);
} else {
STATS_COUNT.with(|c| *c.borrow_mut() += 1);
STATS_BYTES.with(|b| *b.borrow_mut() += len as u64);
}
}
/// Get frame stats as [count, bytes_hi, bytes_lo, dropped, avg_size].
#[wasm_bindgen]
pub fn get_frame_stats() -> Vec<u32> {
let count = STATS_COUNT.with(|c| *c.borrow());
let bytes = STATS_BYTES.with(|b| *b.borrow());
let dropped = STATS_DROPPED.with(|d| *d.borrow());
let avg = if count > 0 { (bytes / count) as u32 } else { 0 };
vec![count as u32, (bytes >> 32) as u32, bytes as u32, dropped as u32, avg]
}
/// Record an RTT sample in milliseconds.
#[wasm_bindgen]
pub fn record_rtt(ms: f64) {
RTT_SAMPLES.with(|s| {
let mut samples = s.borrow_mut();
if samples.len() >= 100 { samples.remove(0); }
samples.push(ms);
});
}
/// Get average RTT in ms.
#[wasm_bindgen]
pub fn get_avg_rtt() -> f64 {
RTT_SAMPLES.with(|s| {
let samples = s.borrow();
if samples.is_empty() { return 0.0; }
samples.iter().sum::<f64>() / samples.len() as f64
})
}
/// Get jitter (RTT standard deviation) in ms.
#[wasm_bindgen]
pub fn get_jitter() -> f64 {
RTT_SAMPLES.with(|s| {
let samples = s.borrow();
if samples.len() < 2 { return 0.0; }
let avg = samples.iter().sum::<f64>() / samples.len() as f64;
let var = samples.iter().map(|s| (s - avg).powi(2)).sum::<f64>() / samples.len() as f64;
var.sqrt()
})
}
/// Composite stream health (0.0 - 1.0).
#[wasm_bindgen]
pub fn stream_health_score() -> f32 {
let count = STATS_COUNT.with(|c| *c.borrow());
let bytes = STATS_BYTES.with(|b| *b.borrow());
let dropped = STATS_DROPPED.with(|d| *d.borrow());
let avg_rtt = get_avg_rtt();
let total = count + dropped;
let drop_rate = if total > 0 { dropped as f64 / total as f64 } else { 0.0 };
let throughput = bytes as f64; // approximate
let frame_score = (count as f64 / 30.0).min(1.0);
let latency_score = (1.0 - (avg_rtt / 500.0).min(1.0)).max(0.0);
let throughput_score = (throughput / 1_000_000.0).min(1.0);
let drop_score = (1.0 - drop_rate * 5.0).max(0.0);
(frame_score * 0.2 + latency_score * 0.3 + throughput_score * 0.2 + drop_score * 0.3) as f32
}
// ─── Tests ───
// ─── v0.12: State Snapshot ───
/// Encode state snapshot: [version:u32][data...].
#[wasm_bindgen]
pub fn encode_state_snapshot(version: u32, data: &[u8]) -> Vec<u8> {
let mut out = Vec::with_capacity(4 + data.len());
out.extend_from_slice(&version.to_le_bytes());
out.extend_from_slice(data);
out
}
/// Decode state snapshot. Returns [version, ...data] or empty if too short.
#[wasm_bindgen]
pub fn decode_state_snapshot(msg: &[u8]) -> Vec<u8> {
if msg.len() < 4 { return vec![]; }
let version = u32::from_le_bytes([msg[0], msg[1], msg[2], msg[3]]);
let mut result = Vec::with_capacity(4 + msg.len() - 4);
result.extend_from_slice(&version.to_le_bytes());
result.extend_from_slice(&msg[4..]);
result
}
// ─── v0.12: Gap Detector ───
thread_local! {
static NEXT_EXPECTED_SEQ: RefCell<u16> = RefCell::new(0);
static SEEN_SEQS: RefCell<Vec<u16>> = RefCell::new(Vec::new());
}
/// Feed a sequence number. Returns: 0=ok, 1=gap, 2=duplicate.
#[wasm_bindgen]
pub fn feed_seq(seq: u16) -> u8 {
SEEN_SEQS.with(|seen| {
let mut seen = seen.borrow_mut();
if seen.contains(&seq) { return 2; } // duplicate
if seen.len() >= 256 { seen.drain(..128); }
seen.push(seq);
NEXT_EXPECTED_SEQ.with(|next| {
let mut expected = next.borrow_mut();
if seq == *expected {
*expected = expected.wrapping_add(1);
0 // ok
} else {
*expected = seq.wrapping_add(1);
1 // gap
}
})
})
}
// ─── v0.12: Protocol Version ───
/// Current protocol version.
#[wasm_bindgen]
pub fn protocol_version() -> u16 { 12 }
/// Check if a remote version is compatible.
#[wasm_bindgen]
pub fn check_protocol_version(v: u16) -> bool { v == 12 }
// ─── v0.13: Replay Ring Buffer ───
thread_local! {
static REPLAY_BUFFER: RefCell<Vec<Vec<u8>>> = RefCell::new(Vec::new());
static REPLAY_MAX: RefCell<usize> = RefCell::new(300);
static CODEC_NAMES: RefCell<Vec<String>> = RefCell::new(Vec::new());
static CODEC_IDS: RefCell<Vec<u8>> = RefCell::new(Vec::new());
}
/// Push a frame into the replay ring buffer.
#[wasm_bindgen]
pub fn push_replay(frame: &[u8]) {
REPLAY_BUFFER.with(|buf| {
let mut buf = buf.borrow_mut();
let max = REPLAY_MAX.with(|m| *m.borrow());
if buf.len() >= max { buf.remove(0); }
buf.push(frame.to_vec());
});
}
/// Get replay frames in range [start, end). Returns concatenated with 4-byte length prefixes.
#[wasm_bindgen]
pub fn get_replay_range(start: usize, end: usize) -> Vec<u8> {
REPLAY_BUFFER.with(|buf| {
let buf = buf.borrow();
let end = end.min(buf.len());
let mut out = Vec::new();
for i in start..end {
let frame = &buf[i];
out.extend_from_slice(&(frame.len() as u32).to_le_bytes());
out.extend_from_slice(frame);
}
out
})
}
/// Get replay buffer length.
#[wasm_bindgen]
pub fn replay_len() -> usize {
REPLAY_BUFFER.with(|buf| buf.borrow().len())
}
// ─── v0.13: Header Serde ───
/// Serialize a frame header to 16 bytes.
#[wasm_bindgen]
pub fn serialize_frame_header(frame_type: u8, flags: u8, seq: u16, timestamp: u32, width: u16, height: u16, payload_len: u32) -> Vec<u8> {
let mut out = Vec::with_capacity(16);
out.push(frame_type);
out.push(flags);
out.extend_from_slice(&seq.to_le_bytes());
out.extend_from_slice(&timestamp.to_le_bytes());
out.extend_from_slice(&width.to_le_bytes());
out.extend_from_slice(&height.to_le_bytes());
out.extend_from_slice(&payload_len.to_le_bytes());
out
}
/// Deserialize a 16-byte header. Returns [type, flags, seq, timestamp, width, height, len].
#[wasm_bindgen]
pub fn deserialize_frame_header(data: &[u8]) -> Vec<u32> {
if data.len() < 16 { return vec![]; }
vec![
data[0] as u32,
data[1] as u32,
u16::from_le_bytes([data[2], data[3]]) as u32,
u32::from_le_bytes([data[4], data[5], data[6], data[7]]),
u16::from_le_bytes([data[8], data[9]]) as u32,
u16::from_le_bytes([data[10], data[11]]) as u32,
u32::from_le_bytes([data[12], data[13], data[14], data[15]]),
]
}
// ─── v0.13: Codec Registry ───
/// Register a named codec.
#[wasm_bindgen]
pub fn register_codec(name: &str, id: u8) {
CODEC_NAMES.with(|n| n.borrow_mut().push(name.to_string()));
CODEC_IDS.with(|i| i.borrow_mut().push(id));
}
/// List registered codecs as comma-separated string.
#[wasm_bindgen]
pub fn list_codecs() -> String {
CODEC_NAMES.with(|n| n.borrow().join(","))
}
// ─── v0.14: RLE Compression ───
/// RLE compress: [byte, count, byte, count, ...].
#[wasm_bindgen]
pub fn rle_compress(data: &[u8]) -> Vec<u8> {
let mut out = Vec::new();
if data.is_empty() { return out; }
let mut i = 0;
while i < data.len() {
let val = data[i];
let mut count: u8 = 1;
while i + (count as usize) < data.len() && data[i + count as usize] == val && count < 255 {
count += 1;
}
out.push(val);
out.push(count);
i += count as usize;
}
out
}
/// RLE decompress.
#[wasm_bindgen]
pub fn rle_decompress(data: &[u8]) -> Vec<u8> {
let mut out = Vec::new();
let mut i = 0;
while i + 1 < data.len() {
let val = data[i];
let count = data[i + 1] as usize;
for _ in 0..count { out.push(val); }
i += 2;
}
out
}
// ─── v0.14: Client Sync Drift ───
thread_local! {
static CLIENT_SEQ: RefCell<u16> = RefCell::new(0);
static CLIENT_ACK: RefCell<u16> = RefCell::new(0);
}
/// Set current client sequence number.
#[wasm_bindgen]
pub fn set_client_seq(seq: u16) { CLIENT_SEQ.with(|s| *s.borrow_mut() = seq); }
/// Set last acknowledged sequence.
#[wasm_bindgen]
pub fn set_client_ack(ack: u16) { CLIENT_ACK.with(|a| *a.borrow_mut() = ack); }
/// Get sync drift (seq - ack).
#[wasm_bindgen]
pub fn get_sync_drift() -> i32 {
CLIENT_SEQ.with(|s| CLIENT_ACK.with(|a| *s.borrow() as i32 - *a.borrow() as i32))
}
// ─── v0.14: Bandwidth Limiting ───
thread_local! {
static BW_LIMIT: RefCell<usize> = RefCell::new(0);
static BW_CONSUMED: RefCell<usize> = RefCell::new(0);
}
/// Set bandwidth limit (bytes per second). 0 = unlimited.
#[wasm_bindgen]
pub fn set_bandwidth_limit(bps: usize) { BW_LIMIT.with(|l| *l.borrow_mut() = bps); }
/// Check if frame of given size can be sent. Returns 0=yes, 1=no.
#[wasm_bindgen]
pub fn check_bandwidth(size: usize) -> u8 {
BW_LIMIT.with(|limit| {
let limit = *limit.borrow();
if limit == 0 { return 0; } // unlimited
BW_CONSUMED.with(|consumed| {
let mut consumed = consumed.borrow_mut();
if *consumed + size <= limit {
*consumed += size;
0
} else {
1
}
})
})
}
/// Reset bandwidth counter (call once per second).
#[wasm_bindgen]
pub fn reset_bandwidth() { BW_CONSUMED.with(|c| *c.borrow_mut() = 0); }
// ─── v0.15: Adaptive Quality ───
thread_local! {
static ABR_RTT: RefCell<Vec<f32>> = RefCell::new(Vec::new());
static ABR_LOSS: RefCell<Vec<f32>> = RefCell::new(Vec::new());
static TRANSFORM_NAMES: RefCell<Vec<String>> = RefCell::new(Vec::new());
}
/// Feed a quality sample (RTT in ms, loss in %).
#[wasm_bindgen]
pub fn feed_quality_sample(rtt: f32, loss: f32) {
ABR_RTT.with(|s| {
let mut s = s.borrow_mut();
if s.len() >= 30 { s.remove(0); }
s.push(rtt);
});
ABR_LOSS.with(|s| {
let mut s = s.borrow_mut();
if s.len() >= 30 { s.remove(0); }
s.push(loss);
});
}
/// Get recommended quality tier 0-4.
#[wasm_bindgen]
pub fn recommended_quality() -> u8 {
let avg_rtt = ABR_RTT.with(|s| {
let s = s.borrow();
if s.is_empty() { 0.0 } else { s.iter().sum::<f32>() / s.len() as f32 }
});
let avg_loss = ABR_LOSS.with(|s| {
let s = s.borrow();
if s.is_empty() { 0.0 } else { s.iter().sum::<f32>() / s.len() as f32 }
});
if avg_rtt > 200.0 || avg_loss > 10.0 { 0 }
else if avg_rtt > 100.0 || avg_loss > 5.0 { 1 }
else if avg_rtt > 50.0 || avg_loss > 2.0 { 2 }
else if avg_rtt > 20.0 || avg_loss > 0.5 { 3 }
else { 4 }
}
// ─── v0.15: Metrics Snapshot ───
/// Get a JSON snapshot of current stream metrics.
#[wasm_bindgen]
pub fn metrics_snapshot(fps: f32, bitrate: u32, clients: u32, drift: f32, health: f32) -> String {
format!(
r#"{{"fps":{:.1},"bitrate":{},"clients":{},"drift":{:.1},"health":{:.2},"quality":{}}}"#,
fps, bitrate, clients, drift, health, recommended_quality(),
)
}
// ─── v0.15: Frame Transforms ───
/// Add a named transform stage.
#[wasm_bindgen]
pub fn add_frame_transform(name: &str) {
TRANSFORM_NAMES.with(|n| n.borrow_mut().push(name.to_string()));
}
/// Apply transforms (identity pass-through, returns same data). Returns stage count in first byte.
#[wasm_bindgen]
pub fn apply_transforms(data: &[u8]) -> Vec<u8> {
let count = TRANSFORM_NAMES.with(|n| n.borrow().len()) as u8;
let mut out = Vec::with_capacity(1 + data.len());
out.push(count);
out.extend_from_slice(data);
out
}
/// List transform stages as comma-separated.
#[wasm_bindgen]
pub fn list_transforms() -> String {
TRANSFORM_NAMES.with(|n| n.borrow().join(","))
}
// ─── v0.16: Frame Encryption ───
/// XOR encrypt frame data with rotating key.
#[wasm_bindgen]
pub fn encrypt_frame(data: &[u8], key: &[u8]) -> Vec<u8> {
if key.is_empty() { return data.to_vec(); }
data.iter().enumerate().map(|(i, &b)| b ^ key[i % key.len()]).collect()
}
/// XOR decrypt frame data (symmetric with encrypt).
#[wasm_bindgen]
pub fn decrypt_frame(data: &[u8], key: &[u8]) -> Vec<u8> {
encrypt_frame(data, key)
}
// ─── v0.16: Migration ───
thread_local! {
static MIGRATION_STATE: RefCell<Vec<u8>> = RefCell::new(Vec::new());
static DEDUP_HASHES: RefCell<Vec<u64>> = RefCell::new(Vec::new());
}
/// Prepare migration state (serialize seq + client count).
#[wasm_bindgen]
pub fn prepare_migration(seq: u16, clients: u16) -> Vec<u8> {
let mut buf = Vec::new();
buf.extend_from_slice(&seq.to_le_bytes());
buf.extend_from_slice(&clients.to_le_bytes());
MIGRATION_STATE.with(|s| *s.borrow_mut() = buf.clone());
buf
}
/// Accept migration state. Returns [seq, clients] or empty on error.
#[wasm_bindgen]
pub fn accept_migration(state: &[u8]) -> Vec<u16> {
if state.len() < 4 { return vec![]; }
let seq = u16::from_le_bytes([state[0], state[1]]);
let clients = u16::from_le_bytes([state[2], state[3]]);
MIGRATION_STATE.with(|s| *s.borrow_mut() = state.to_vec());
vec![seq, clients]
}
// ─── v0.16: Frame Dedup ───
fn fnv_hash(data: &[u8]) -> u64 {
let mut h: u64 = 0xcbf29ce484222325;
for &b in data {
h ^= b as u64;
h = h.wrapping_mul(0x100000001b3);
}
h
}
/// Check if frame is duplicate. Returns 1=dup, 0=new.
#[wasm_bindgen]
pub fn is_frame_duplicate(data: &[u8]) -> u8 {
let h = fnv_hash(data);
DEDUP_HASHES.with(|hashes| {
let mut hashes = hashes.borrow_mut();
if hashes.contains(&h) { return 1; }
if hashes.len() >= 200 { hashes.remove(0); }
hashes.push(h);
0
})
}
#[cfg(test)]
mod tests {
use super::*;
@ -1065,4 +1588,208 @@ mod tests {
assert_eq!(result[5], 1920); // width
assert_eq!(result[6], 1080); // height
}
// ─── v0.10 Tests ───
#[test]
fn test_mux_demux_message() {
let msg = build_message(0x02, 0, 1, 0, 320, 240, &[0xBB; 5]);
let muxed = mux_message(7, &msg);
assert_eq!(extract_stream_id(muxed[1]), 7);
let demuxed = demux_message(&muxed);
assert_eq!(demuxed[0], 7); // stream_id prepended
}
#[test]
fn test_frame_priority() {
assert_eq!(frame_priority(0xF0), 0); // Keyframe = Critical
assert_eq!(frame_priority(0x01), 1); // Pixels = High
assert_eq!(frame_priority(0x31), 2); // SignalDiff = Normal
assert_eq!(frame_priority(0xFE), 3); // Ping = Low
}
#[test]
fn test_should_drop_frame() {
assert!(!should_drop_frame(3, 0)); // no congestion
assert!(should_drop_frame(3, 1)); // mild: drop low
assert!(!should_drop_frame(0, 3)); // severe: keep critical
assert!(should_drop_frame(1, 3)); // severe: drop high
}
#[test]
fn test_stream_id_helpers() {
let combined = embed_stream_id(0x03, 10);
assert_eq!(extract_stream_id(combined), 10);
assert_eq!(combined & FLAGS_MASK, 0x03);
}
// ─── v0.11 Tests ───
#[test]
fn test_frame_stats() {
// Reset state (thread local may have state from other tests)
record_frame_stat(1000, false);
record_frame_stat(2000, false);
record_frame_stat(500, true);
let stats = get_frame_stats();
assert!(stats[0] >= 2, "Should have at least 2 frames: {}", stats[0]);
assert!(stats[3] >= 1, "Should have at least 1 drop: {}", stats[3]);
}
#[test]
fn test_rtt_tracking() {
record_rtt(10.0);
record_rtt(20.0);
record_rtt(30.0);
let avg = get_avg_rtt();
assert!(avg > 0.0, "Should have positive RTT: {}", avg);
let jitter = get_jitter();
assert!(jitter >= 0.0, "Jitter should be non-negative: {}", jitter);
}
#[test]
fn test_stream_health() {
let health = stream_health_score();
assert!(health >= 0.0 && health <= 1.0, "Health should be 0-1: {}", health);
}
// ─── v0.12 Tests ───
#[test]
fn test_state_snapshot() {
let state = b"test state data";
let encoded = encode_state_snapshot(7, state);
let decoded = decode_state_snapshot(&encoded);
assert_eq!(decoded.len(), 4 + state.len());
assert_eq!(u32::from_le_bytes([decoded[0], decoded[1], decoded[2], decoded[3]]), 7);
}
#[test]
fn test_gap_detection() {
let r0 = feed_seq(0);
assert!(r0 == 0 || r0 == 1, "First feed: {}", r0); // may be Gap if state from other tests
let r1 = feed_seq(1);
assert!(r1 == 0 || r1 == 1, "Second: {}", r1);
}
#[test]
fn test_protocol_version() {
assert!(check_protocol_version(12));
assert!(!check_protocol_version(11));
}
// ─── v0.13 Tests ───
#[test]
fn test_replay_buffer() {
push_replay(&[1, 2, 3]);
push_replay(&[4, 5, 6]);
assert!(replay_len() >= 2, "Should have at least 2 frames");
let range = get_replay_range(0, 1);
assert!(range.len() > 0, "Should get data back");
}
#[test]
fn test_header_serde() {
let header = serialize_frame_header(0x02, 0x01, 42, 1000, 800, 600, 256);
let decoded = deserialize_frame_header(&header);
assert_eq!(decoded.len(), 7);
assert_eq!(decoded[0], 0x02);
assert_eq!(decoded[2], 42);
assert_eq!(decoded[4], 800);
}
#[test]
fn test_codec_registry() {
register_codec("jpeg", 1);
register_codec("webp", 2);
let list = list_codecs();
assert!(list.contains("jpeg"), "Should contain jpeg: {}", list);
}
// ─── v0.14 Tests ───
#[test]
fn test_rle_compress_decompress() {
let data = vec![5, 5, 5, 3, 3, 1];
let compressed = rle_compress(&data);
let decompressed = rle_decompress(&compressed);
assert_eq!(decompressed, data);
}
#[test]
fn test_sync_drift() {
set_client_seq(20);
set_client_ack(15);
assert_eq!(get_sync_drift(), 5);
}
#[test]
fn test_bandwidth_limit() {
set_bandwidth_limit(100);
reset_bandwidth();
assert_eq!(check_bandwidth(50), 0); // ok
assert_eq!(check_bandwidth(60), 1); // over
reset_bandwidth();
assert_eq!(check_bandwidth(60), 0); // ok after reset
}
// ─── v0.15 Tests ───
#[test]
fn test_adaptive_quality() {
// Good connection
for _ in 0..10 { feed_quality_sample(10.0, 0.0); }
assert_eq!(recommended_quality(), 4);
// Bad connection
for _ in 0..30 { feed_quality_sample(250.0, 15.0); }
assert_eq!(recommended_quality(), 0);
}
#[test]
fn test_metrics_snapshot() {
let json = metrics_snapshot(30.0, 500000, 3, 2.5, 0.95);
assert!(json.contains("30.0"), "Has fps: {}", json);
assert!(json.contains("500000"), "Has bitrate: {}", json);
}
#[test]
fn test_frame_transforms() {
add_frame_transform("encode");
add_frame_transform("compress");
let out = apply_transforms(&[1, 2, 3]);
assert!(out.len() > 3, "Has stage count prefix");
let list = list_transforms();
assert!(list.contains("encode"), "Has encode: {}", list);
}
// ─── v0.16 Tests ───
#[test]
fn test_encrypt_decrypt_frame() {
let data = vec![1, 2, 3, 4, 5];
let key = vec![0xAB, 0xCD];
let enc = encrypt_frame(&data, &key);
assert_ne!(enc, data);
let dec = decrypt_frame(&enc, &key);
assert_eq!(dec, data);
}
#[test]
fn test_migration_handoff() {
let state = prepare_migration(100, 3);
let result = accept_migration(&state);
assert_eq!(result, vec![100, 3]);
}
#[test]
fn test_frame_dedup_wasm() {
let frame = vec![99, 88, 77];
let first = is_frame_duplicate(&frame);
assert_eq!(first, 0);
// Second call with same data should detect dup
let second = is_frame_duplicate(&frame);
assert_eq!(second, 1);
}
}

View file

@ -1,17 +1,14 @@
# Changelog
## [0.9.0] - 2026-03-10
### Fixed
- Unused imports, variables, visibility warnings (zero-warning build)
- Doctest annotations
## [0.16.0] - 2026-03-10
### Added
- 5 ds_hub unit tests (frame constants, signal encoding, batch header, IR push, PanelEvent debug)
- **`FrameEncryptor`** — XOR cipher `encrypt(data, key)` / `decrypt(data, key)`
- **`StreamMigration`** — `prepare_handoff()` / `accept_handoff()` for server transfer
- **`FrameDedup`** — FNV hash dedup with `is_duplicate()`, `dedup_savings()`
- **`PriorityQueue<T>`** — `enqueue(item, priority)`, `dequeue()` highest-first
- 4 new tests (143 total)
### Test Coverage
- **111 tests** (codec 45, relay 33, protocol 28, ds_hub 5)
## [0.5.0] - 2026-03-09
- Initial release
## [0.15.0] — AdaptiveBitrate, MetricsSnapshot, FramePipeline
## [0.14.0] — FrameCompressor (RLE), MultiClientSync, BandwidthThrottle
## [0.13.0] — ReplayBuffer, header serde, CodecRegistry

View file

@ -1,6 +1,6 @@
[package]
name = "ds-stream"
version = "0.9.0"
version = "0.16.0"
edition.workspace = true
license.workspace = true
description = "Universal bitstream streaming — any input to any output"

File diff suppressed because it is too large Load diff

View file

@ -15,6 +15,12 @@
/// Frame header size in bytes.
pub const HEADER_SIZE: usize = 16;
/// Protocol version.
pub const PROTOCOL_VERSION: u16 = 12;
/// Check if a remote version is compatible (same major).
pub fn check_version(v: u16) -> bool { v == PROTOCOL_VERSION }
/// Magic bytes for protocol identification.
pub const MAGIC: [u8; 2] = [0xD5, 0x7A]; // "DS" + "z" for stream
@ -60,6 +66,17 @@ pub enum FrameType {
/// Neural scene description (latent space representation)
NeuralLatent = 0x43,
// ── v0.12: State Sync ──
/// State snapshot for reconnect sync
StateSync = 0x50,
/// Replay buffer frame
Replay = 0x51,
/// RLE-compressed frame
Compressed = 0x52,
// ── Control ──
/// Keyframe — receiver should reset state
@ -93,6 +110,9 @@ impl FrameType {
0x41 => Some(Self::NeuralAudio),
0x42 => Some(Self::NeuralActuator),
0x43 => Some(Self::NeuralLatent),
0x50 => Some(Self::StateSync),
0x51 => Some(Self::Replay),
0x52 => Some(Self::Compressed),
0xF0 => Some(Self::Keyframe),
0x0F => Some(Self::Auth),
0xFD => Some(Self::Ack),
@ -121,6 +141,9 @@ impl FrameType {
Self::NeuralAudio => "NeuralAudio",
Self::NeuralActuator => "NeuralActuator",
Self::NeuralLatent => "NeuralLatent",
Self::StateSync => "StateSync",
Self::Replay => "Replay",
Self::Compressed => "Compressed",
Self::Keyframe => "Keyframe",
Self::Auth => "Auth",
Self::Ack => "Ack",
@ -865,6 +888,42 @@ pub fn descramble_payload(payload: &[u8], key: &[u8]) -> Vec<u8> {
scramble_payload(payload, key)
}
// ─── v0.10: Stream Multiplexing ───
/// Stream ID occupies the upper 4 bits of the flags byte (supports 0-15).
pub type StreamId = u8;
/// Mask for extracting stream ID from flags.
pub const STREAM_ID_MASK: u8 = 0xF0;
/// Mask for extracting non-stream flags from flags.
pub const FLAGS_MASK: u8 = 0x0F;
/// Extract stream ID from flags byte.
pub fn extract_stream_id(flags: u8) -> StreamId {
(flags & STREAM_ID_MASK) >> 4
}
/// Embed stream ID into flags byte (preserves lower 4 flag bits).
pub fn embed_stream_id(flags: u8, stream_id: StreamId) -> u8 {
(flags & FLAGS_MASK) | ((stream_id & 0x0F) << 4)
}
// ─── v0.11: Stream Health ───
/// Compute composite stream health score (0.0 = dead, 1.0 = perfect).
/// Inputs: frame_count (recent window), avg_rtt_ms, throughput_bps, drop_rate (0.0-1.0).
pub fn compute_health(frame_count: u64, avg_rtt_ms: f64, throughput_bps: f64, drop_rate: f64) -> f32 {
// Score components (each 0.0-1.0):
let frame_score = (frame_count as f64 / 30.0).min(1.0); // 30+ fps = perfect
let latency_score = (1.0 - (avg_rtt_ms / 500.0).min(1.0)).max(0.0); // <500ms = good
let throughput_score = (throughput_bps / 1_000_000.0).min(1.0); // 1Mbps = perfect
let drop_score = (1.0 - drop_rate * 5.0).max(0.0); // >20% drops = 0
// Weighted average: latency and drops matter most
let health = frame_score * 0.2 + latency_score * 0.3 + throughput_score * 0.2 + drop_score * 0.3;
health as f32
}
// ─── Tests ───
#[cfg(test)]