Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 8dddbf9

Browse files
authored
Merge pull request #363 from ruvnet/feat/adr-079-camera-ground-truth
feat: camera ground-truth training pipeline with ruvector optimizations (ADR-079)
2 parents b5e924c + 35903a3 commit 8dddbf9

37 files changed

Lines changed: 6361 additions & 30 deletions

docs/adr/ADR-079-camera-ground-truth-training.md

Lines changed: 512 additions & 0 deletions
Large diffs are not rendered by default.

rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/src/inference.rs

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -330,9 +330,36 @@ impl<B: Backend> InferenceEngine<B> {
330330
Ok(result)
331331
}
332332

333-
/// Run batched inference
333+
/// Run batched inference.
334+
///
335+
/// Stacks all inputs along a new batch dimension, runs a single
336+
/// backend call, then splits the output back into individual tensors.
337+
/// Falls back to sequential inference if stack/split fails.
334338
pub fn infer_batch(&self, inputs: &[Tensor]) -> NnResult<Vec<Tensor>> {
335-
inputs.iter().map(|input| self.infer(input)).collect()
339+
if inputs.is_empty() {
340+
return Ok(Vec::new());
341+
}
342+
if inputs.len() == 1 {
343+
return Ok(vec![self.infer(&inputs[0])?]);
344+
}
345+
// Try batched path: stack -> single call -> split
346+
match Tensor::stack(inputs) {
347+
Ok(batched_input) => {
348+
let n = inputs.len();
349+
let batched_output = self.backend.run_single(&batched_input)?;
350+
match batched_output.split(n) {
351+
Ok(outputs) => Ok(outputs),
352+
Err(_) => {
353+
// Fallback: sequential
354+
inputs.iter().map(|input| self.infer(input)).collect()
355+
}
356+
}
357+
}
358+
Err(_) => {
359+
// Fallback: sequential if shapes are incompatible
360+
inputs.iter().map(|input| self.infer(input)).collect()
361+
}
362+
}
336363
}
337364

338365
/// Get inference statistics

rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/src/tensor.rs

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -304,6 +304,74 @@ impl Tensor {
304304
}
305305
}
306306

307+
/// Stack multiple tensors along a new batch dimension (dim 0).
308+
///
309+
/// All tensors must have the same shape. The result has one extra
310+
/// leading dimension equal to `tensors.len()`.
311+
pub fn stack(tensors: &[Tensor]) -> NnResult<Tensor> {
312+
if tensors.is_empty() {
313+
return Err(NnError::tensor_op("Cannot stack zero tensors"));
314+
}
315+
let first_shape = tensors[0].shape();
316+
for (i, t) in tensors.iter().enumerate().skip(1) {
317+
if t.shape() != first_shape {
318+
return Err(NnError::tensor_op(&format!(
319+
"Shape mismatch at index {i}: expected {first_shape}, got {}",
320+
t.shape()
321+
)));
322+
}
323+
}
324+
let mut all_data: Vec<f32> = Vec::with_capacity(tensors.len() * first_shape.numel());
325+
for t in tensors {
326+
let data = t.to_vec()?;
327+
all_data.extend_from_slice(&data);
328+
}
329+
let mut new_dims = vec![tensors.len()];
330+
new_dims.extend_from_slice(first_shape.dims());
331+
let arr = ndarray::ArrayD::from_shape_vec(
332+
ndarray::IxDyn(&new_dims),
333+
all_data,
334+
)
335+
.map_err(|e| NnError::tensor_op(&format!("Stack reshape failed: {e}")))?;
336+
Ok(Tensor::FloatND(arr))
337+
}
338+
339+
/// Split a tensor along dim 0 into `n` sub-tensors.
340+
///
341+
/// The first dimension must be evenly divisible by `n`.
342+
pub fn split(self, n: usize) -> NnResult<Vec<Tensor>> {
343+
if n == 0 {
344+
return Err(NnError::tensor_op("Cannot split into 0 pieces"));
345+
}
346+
let shape = self.shape();
347+
let batch = shape.dim(0).ok_or_else(|| NnError::tensor_op("Tensor has no dimensions"))?;
348+
if batch % n != 0 {
349+
return Err(NnError::tensor_op(&format!(
350+
"Batch dim {batch} not divisible by {n}"
351+
)));
352+
}
353+
let chunk_size = batch / n;
354+
let data = self.to_vec()?;
355+
let elem_per_sample = shape.numel() / batch;
356+
let sub_dims: Vec<usize> = {
357+
let mut d = shape.dims().to_vec();
358+
d[0] = chunk_size;
359+
d
360+
};
361+
let mut result = Vec::with_capacity(n);
362+
for i in 0..n {
363+
let start = i * chunk_size * elem_per_sample;
364+
let end = start + chunk_size * elem_per_sample;
365+
let arr = ndarray::ArrayD::from_shape_vec(
366+
ndarray::IxDyn(&sub_dims),
367+
data[start..end].to_vec(),
368+
)
369+
.map_err(|e| NnError::tensor_op(&format!("Split reshape failed: {e}")))?;
370+
result.push(Tensor::FloatND(arr));
371+
}
372+
Ok(result)
373+
}
374+
307375
/// Compute standard deviation
308376
pub fn std(&self) -> NnResult<f32> {
309377
match self {
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
//! CLI argument definitions and early-exit mode handlers.
2+
3+
use std::path::PathBuf;
4+
use clap::Parser;
5+
6+
/// CLI arguments for the sensing server.
7+
#[derive(Parser, Debug)]
8+
#[command(name = "sensing-server", about = "WiFi-DensePose sensing server")]
9+
pub struct Args {
10+
/// HTTP port for UI and REST API
11+
#[arg(long, default_value = "8080")]
12+
pub http_port: u16,
13+
14+
/// WebSocket port for sensing stream
15+
#[arg(long, default_value = "8765")]
16+
pub ws_port: u16,
17+
18+
/// UDP port for ESP32 CSI frames
19+
#[arg(long, default_value = "5005")]
20+
pub udp_port: u16,
21+
22+
/// Path to UI static files
23+
#[arg(long, default_value = "../../ui")]
24+
pub ui_path: PathBuf,
25+
26+
/// Tick interval in milliseconds (default 100 ms = 10 fps for smooth pose animation)
27+
#[arg(long, default_value = "100")]
28+
pub tick_ms: u64,
29+
30+
/// Bind address (default 127.0.0.1; set to 0.0.0.0 for network access)
31+
#[arg(long, default_value = "127.0.0.1", env = "SENSING_BIND_ADDR")]
32+
pub bind_addr: String,
33+
34+
/// Data source: auto, wifi, esp32, simulate
35+
#[arg(long, default_value = "auto")]
36+
pub source: String,
37+
38+
/// Run vital sign detection benchmark (1000 frames) and exit
39+
#[arg(long)]
40+
pub benchmark: bool,
41+
42+
/// Load model config from an RVF container at startup
43+
#[arg(long, value_name = "PATH")]
44+
pub load_rvf: Option<PathBuf>,
45+
46+
/// Save current model state as an RVF container on shutdown
47+
#[arg(long, value_name = "PATH")]
48+
pub save_rvf: Option<PathBuf>,
49+
50+
/// Load a trained .rvf model for inference
51+
#[arg(long, value_name = "PATH")]
52+
pub model: Option<PathBuf>,
53+
54+
/// Enable progressive loading (Layer A instant start)
55+
#[arg(long)]
56+
pub progressive: bool,
57+
58+
/// Export an RVF container package and exit (no server)
59+
#[arg(long, value_name = "PATH")]
60+
pub export_rvf: Option<PathBuf>,
61+
62+
/// Run training mode (train a model and exit)
63+
#[arg(long)]
64+
pub train: bool,
65+
66+
/// Path to dataset directory (MM-Fi or Wi-Pose)
67+
#[arg(long, value_name = "PATH")]
68+
pub dataset: Option<PathBuf>,
69+
70+
/// Dataset type: "mmfi" or "wipose"
71+
#[arg(long, value_name = "TYPE", default_value = "mmfi")]
72+
pub dataset_type: String,
73+
74+
/// Number of training epochs
75+
#[arg(long, default_value = "100")]
76+
pub epochs: usize,
77+
78+
/// Directory for training checkpoints
79+
#[arg(long, value_name = "DIR")]
80+
pub checkpoint_dir: Option<PathBuf>,
81+
82+
/// Run self-supervised contrastive pretraining (ADR-024)
83+
#[arg(long)]
84+
pub pretrain: bool,
85+
86+
/// Number of pretraining epochs (default 50)
87+
#[arg(long, default_value = "50")]
88+
pub pretrain_epochs: usize,
89+
90+
/// Extract embeddings mode: load model and extract CSI embeddings
91+
#[arg(long)]
92+
pub embed: bool,
93+
94+
/// Build fingerprint index from embeddings (env|activity|temporal|person)
95+
#[arg(long, value_name = "TYPE")]
96+
pub build_index: Option<String>,
97+
98+
/// Node positions for multistatic fusion (format: "x,y,z;x,y,z;...")
99+
#[arg(long, env = "SENSING_NODE_POSITIONS")]
100+
pub node_positions: Option<String>,
101+
102+
/// Start field model calibration on boot (empty room required)
103+
#[arg(long)]
104+
pub calibrate: bool,
105+
}

0 commit comments

Comments
 (0)