Compare commits
22 Commits
c930d4705d
...
622c91a954
| Author | SHA1 | Date | |
|---|---|---|---|
| 622c91a954 | |||
| f522bf10ac | |||
| 63e59c0544 | |||
| 9f5f2df07c | |||
| 1c2a8466e7 | |||
| 0ef750de69 | |||
| dccea21bfe | |||
| 1081fb472f | |||
| abd6f5cf6e | |||
| 1b0e12e824 | |||
| a7497f6045 | |||
| 6bc77cb777 | |||
| 164eead5ec | |||
| f4b1174e13 | |||
| c478e2433d | |||
| 389cbdb063 | |||
| df2082f532 | |||
| a9f5b11f69 | |||
| 2d80a218c5 | |||
| a080f0608b | |||
| 8abba16137 | |||
| dc6aa950e3 |
@@ -5,3 +5,4 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
voltex_math.workspace = true
|
||||
voltex_ecs.workspace = true
|
||||
|
||||
@@ -67,6 +67,25 @@ impl NavMesh {
|
||||
(a.z + b.z) / 2.0,
|
||||
)
|
||||
}
|
||||
|
||||
/// Return the shared edge (portal) vertices between two adjacent triangles.
|
||||
/// Returns (left, right) vertices of the portal edge as seen from `from_tri` looking toward `to_tri`.
|
||||
/// Returns None if the triangles are not adjacent.
|
||||
pub fn shared_edge(&self, from_tri: usize, to_tri: usize) -> Option<(Vec3, Vec3)> {
|
||||
let tri = &self.triangles[from_tri];
|
||||
for edge_idx in 0..3 {
|
||||
if tri.neighbors[edge_idx] == Some(to_tri) {
|
||||
let (i0, i1) = match edge_idx {
|
||||
0 => (tri.indices[0], tri.indices[1]),
|
||||
1 => (tri.indices[1], tri.indices[2]),
|
||||
2 => (tri.indices[2], tri.indices[0]),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
return Some((self.vertices[i0], self.vertices[i1]));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Test whether `point` lies inside or on the triangle (a, b, c) using XZ barycentric coordinates.
|
||||
@@ -90,6 +109,104 @@ pub fn point_in_triangle_xz(point: Vec3, a: Vec3, b: Vec3, c: Vec3) -> bool {
|
||||
u >= 0.0 && v >= 0.0 && w >= 0.0
|
||||
}
|
||||
|
||||
/// Serialize a NavMesh to binary format.
|
||||
/// Format: vertex_count(u32) + vertices(f32*3 each) + triangle_count(u32) + triangles(indices u32*3 + neighbors i32*3 each)
|
||||
pub fn serialize_navmesh(navmesh: &NavMesh) -> Vec<u8> {
|
||||
let mut data = Vec::new();
|
||||
|
||||
// Vertex count
|
||||
let vc = navmesh.vertices.len() as u32;
|
||||
data.extend_from_slice(&vc.to_le_bytes());
|
||||
|
||||
// Vertices
|
||||
for v in &navmesh.vertices {
|
||||
data.extend_from_slice(&v.x.to_le_bytes());
|
||||
data.extend_from_slice(&v.y.to_le_bytes());
|
||||
data.extend_from_slice(&v.z.to_le_bytes());
|
||||
}
|
||||
|
||||
// Triangle count
|
||||
let tc = navmesh.triangles.len() as u32;
|
||||
data.extend_from_slice(&tc.to_le_bytes());
|
||||
|
||||
// Triangles: indices(u32*3) + neighbors(i32*3, -1 for None)
|
||||
for tri in &navmesh.triangles {
|
||||
for &idx in &tri.indices {
|
||||
data.extend_from_slice(&(idx as u32).to_le_bytes());
|
||||
}
|
||||
for &nb in &tri.neighbors {
|
||||
let val: i32 = match nb {
|
||||
Some(i) => i as i32,
|
||||
None => -1,
|
||||
};
|
||||
data.extend_from_slice(&val.to_le_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
/// Deserialize a NavMesh from binary data.
|
||||
pub fn deserialize_navmesh(data: &[u8]) -> Result<NavMesh, String> {
|
||||
let mut offset = 0;
|
||||
|
||||
let read_u32 = |off: &mut usize| -> Result<u32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = u32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let read_i32 = |off: &mut usize| -> Result<i32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = i32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let read_f32 = |off: &mut usize| -> Result<f32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = f32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let vc = read_u32(&mut offset)? as usize;
|
||||
let mut vertices = Vec::with_capacity(vc);
|
||||
for _ in 0..vc {
|
||||
let x = read_f32(&mut offset)?;
|
||||
let y = read_f32(&mut offset)?;
|
||||
let z = read_f32(&mut offset)?;
|
||||
vertices.push(Vec3::new(x, y, z));
|
||||
}
|
||||
|
||||
let tc = read_u32(&mut offset)? as usize;
|
||||
let mut triangles = Vec::with_capacity(tc);
|
||||
for _ in 0..tc {
|
||||
let i0 = read_u32(&mut offset)? as usize;
|
||||
let i1 = read_u32(&mut offset)? as usize;
|
||||
let i2 = read_u32(&mut offset)? as usize;
|
||||
let n0 = read_i32(&mut offset)?;
|
||||
let n1 = read_i32(&mut offset)?;
|
||||
let n2 = read_i32(&mut offset)?;
|
||||
let to_opt = |v: i32| -> Option<usize> {
|
||||
if v < 0 { None } else { Some(v as usize) }
|
||||
};
|
||||
triangles.push(NavTriangle {
|
||||
indices: [i0, i1, i2],
|
||||
neighbors: [to_opt(n0), to_opt(n1), to_opt(n2)],
|
||||
});
|
||||
}
|
||||
|
||||
Ok(NavMesh::new(vertices, triangles))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -155,4 +272,44 @@ mod tests {
|
||||
assert!((mid.y - 0.0).abs() < 1e-5);
|
||||
assert!((mid.z - 1.0).abs() < 1e-5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shared_edge() {
|
||||
let nm = make_simple_navmesh();
|
||||
// Tri 0 edge 1 connects to Tri 1: shared edge is v1=(2,0,0) and v2=(1,0,2)
|
||||
let (left, right) = nm.shared_edge(0, 1).expect("should find shared edge");
|
||||
assert_eq!(left, Vec3::new(2.0, 0.0, 0.0));
|
||||
assert_eq!(right, Vec3::new(1.0, 0.0, 2.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shared_edge_not_adjacent() {
|
||||
let nm = make_simple_navmesh();
|
||||
// Tri 0 is not adjacent to itself via neighbors
|
||||
assert!(nm.shared_edge(0, 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_roundtrip() {
|
||||
let nm = make_simple_navmesh();
|
||||
let data = serialize_navmesh(&nm);
|
||||
let nm2 = deserialize_navmesh(&data).expect("should deserialize");
|
||||
assert_eq!(nm2.vertices.len(), nm.vertices.len());
|
||||
assert_eq!(nm2.triangles.len(), nm.triangles.len());
|
||||
for (a, b) in nm.vertices.iter().zip(nm2.vertices.iter()) {
|
||||
assert!((a.x - b.x).abs() < 1e-6);
|
||||
assert!((a.y - b.y).abs() < 1e-6);
|
||||
assert!((a.z - b.z).abs() < 1e-6);
|
||||
}
|
||||
for (a, b) in nm.triangles.iter().zip(nm2.triangles.iter()) {
|
||||
assert_eq!(a.indices, b.indices);
|
||||
assert_eq!(a.neighbors, b.neighbors);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_empty_data() {
|
||||
let result = deserialize_navmesh(&[]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
651
crates/voltex_ai/src/navmesh_builder.rs
Normal file
651
crates/voltex_ai/src/navmesh_builder.rs
Normal file
@@ -0,0 +1,651 @@
|
||||
use voltex_math::Vec3;
|
||||
use crate::navmesh::{NavMesh, NavTriangle};
|
||||
|
||||
/// Configuration for navmesh generation.
|
||||
pub struct NavMeshBuilder {
|
||||
/// XZ voxel size (default 0.3)
|
||||
pub cell_size: f32,
|
||||
/// Y voxel size (default 0.2)
|
||||
pub cell_height: f32,
|
||||
/// Minimum clearance height for walkable areas (default 2.0)
|
||||
pub agent_height: f32,
|
||||
/// Agent capsule radius used to erode walkable areas (default 0.5)
|
||||
pub agent_radius: f32,
|
||||
/// Maximum walkable slope angle in degrees (default 45.0)
|
||||
pub max_slope: f32,
|
||||
}
|
||||
|
||||
impl Default for NavMeshBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cell_size: 0.3,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.5,
|
||||
max_slope: 45.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heightfield: a 2D grid of min/max height spans for voxelized geometry.
|
||||
pub struct Heightfield {
|
||||
pub width: usize, // number of cells along X
|
||||
pub depth: usize, // number of cells along Z
|
||||
pub min_x: f32,
|
||||
pub min_z: f32,
|
||||
pub cell_size: f32,
|
||||
pub cell_height: f32,
|
||||
/// For each cell (x, z), store (min_y, max_y). None if empty.
|
||||
pub cells: Vec<Option<(f32, f32)>>,
|
||||
}
|
||||
|
||||
impl Heightfield {
|
||||
pub fn cell_index(&self, x: usize, z: usize) -> usize {
|
||||
z * self.width + x
|
||||
}
|
||||
}
|
||||
|
||||
/// Map of walkable cells. True if the cell is walkable.
|
||||
pub struct WalkableMap {
|
||||
pub width: usize,
|
||||
pub depth: usize,
|
||||
pub min_x: f32,
|
||||
pub min_z: f32,
|
||||
pub cell_size: f32,
|
||||
pub walkable: Vec<bool>,
|
||||
/// Height at each walkable cell (top of walkable surface).
|
||||
pub heights: Vec<f32>,
|
||||
}
|
||||
|
||||
impl WalkableMap {
|
||||
pub fn cell_index(&self, x: usize, z: usize) -> usize {
|
||||
z * self.width + x
|
||||
}
|
||||
}
|
||||
|
||||
/// A region of connected walkable cells (flood-fill result).
|
||||
pub struct RegionMap {
|
||||
pub width: usize,
|
||||
pub depth: usize,
|
||||
pub min_x: f32,
|
||||
pub min_z: f32,
|
||||
pub cell_size: f32,
|
||||
/// Region ID per cell. 0 = not walkable, 1+ = region ID.
|
||||
pub regions: Vec<u32>,
|
||||
pub heights: Vec<f32>,
|
||||
pub num_regions: u32,
|
||||
}
|
||||
|
||||
impl NavMeshBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Step 1: Rasterize triangles into a heightfield grid.
|
||||
pub fn voxelize(&self, vertices: &[Vec3], indices: &[u32]) -> Heightfield {
|
||||
// Find bounding box
|
||||
let mut min_x = f32::INFINITY;
|
||||
let mut max_x = f32::NEG_INFINITY;
|
||||
let mut min_y = f32::INFINITY;
|
||||
let mut max_y = f32::NEG_INFINITY;
|
||||
let mut min_z = f32::INFINITY;
|
||||
let mut max_z = f32::NEG_INFINITY;
|
||||
|
||||
for v in vertices {
|
||||
min_x = min_x.min(v.x);
|
||||
max_x = max_x.max(v.x);
|
||||
min_y = min_y.min(v.y);
|
||||
max_y = max_y.max(v.y);
|
||||
min_z = min_z.min(v.z);
|
||||
max_z = max_z.max(v.z);
|
||||
}
|
||||
|
||||
let width = ((max_x - min_x) / self.cell_size).ceil() as usize + 1;
|
||||
let depth = ((max_z - min_z) / self.cell_size).ceil() as usize + 1;
|
||||
let mut cells: Vec<Option<(f32, f32)>> = vec![None; width * depth];
|
||||
|
||||
// Rasterize each triangle
|
||||
for tri_i in (0..indices.len()).step_by(3) {
|
||||
if tri_i + 2 >= indices.len() {
|
||||
break;
|
||||
}
|
||||
let v0 = vertices[indices[tri_i] as usize];
|
||||
let v1 = vertices[indices[tri_i + 1] as usize];
|
||||
let v2 = vertices[indices[tri_i + 2] as usize];
|
||||
|
||||
// Find bounding box of triangle in grid coords
|
||||
let tri_min_x = v0.x.min(v1.x).min(v2.x);
|
||||
let tri_max_x = v0.x.max(v1.x).max(v2.x);
|
||||
let tri_min_z = v0.z.min(v1.z).min(v2.z);
|
||||
let tri_max_z = v0.z.max(v1.z).max(v2.z);
|
||||
|
||||
let gx0 = ((tri_min_x - min_x) / self.cell_size).floor() as isize;
|
||||
let gx1 = ((tri_max_x - min_x) / self.cell_size).ceil() as isize;
|
||||
let gz0 = ((tri_min_z - min_z) / self.cell_size).floor() as isize;
|
||||
let gz1 = ((tri_max_z - min_z) / self.cell_size).ceil() as isize;
|
||||
|
||||
let gx0 = gx0.max(0) as usize;
|
||||
let gx1 = (gx1 as usize).min(width - 1);
|
||||
let gz0 = gz0.max(0) as usize;
|
||||
let gz1 = (gz1 as usize).min(depth - 1);
|
||||
|
||||
for gz in gz0..=gz1 {
|
||||
for gx in gx0..=gx1 {
|
||||
let cx = min_x + (gx as f32 + 0.5) * self.cell_size;
|
||||
let cz = min_z + (gz as f32 + 0.5) * self.cell_size;
|
||||
|
||||
// Check if cell center is inside triangle (XZ)
|
||||
let p = Vec3::new(cx, 0.0, cz);
|
||||
if point_in_triangle_xz_loose(p, v0, v1, v2, self.cell_size * 0.5) {
|
||||
// Interpolate Y at this XZ point
|
||||
let y = interpolate_y(v0, v1, v2, cx, cz);
|
||||
let idx = gz * width + gx;
|
||||
match &mut cells[idx] {
|
||||
Some((ref mut lo, ref mut hi)) => {
|
||||
*lo = lo.min(y);
|
||||
*hi = hi.max(y);
|
||||
}
|
||||
None => {
|
||||
cells[idx] = Some((y, y));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Heightfield {
|
||||
width,
|
||||
depth,
|
||||
min_x,
|
||||
min_z,
|
||||
cell_size: self.cell_size,
|
||||
cell_height: self.cell_height,
|
||||
cells,
|
||||
}
|
||||
}
|
||||
|
||||
/// Step 2: Mark walkable cells based on slope and agent height clearance.
|
||||
pub fn mark_walkable(&self, hf: &Heightfield) -> WalkableMap {
|
||||
let max_slope_cos = (self.max_slope * std::f32::consts::PI / 180.0).cos();
|
||||
let n = hf.width * hf.depth;
|
||||
let mut walkable = vec![false; n];
|
||||
let mut heights = vec![0.0f32; n];
|
||||
|
||||
for z in 0..hf.depth {
|
||||
for x in 0..hf.width {
|
||||
let idx = z * hf.width + x;
|
||||
if let Some((_lo, hi)) = hf.cells[idx] {
|
||||
// Check slope by comparing height differences with neighbors
|
||||
let slope_ok = self.check_slope(hf, x, z, max_slope_cos);
|
||||
|
||||
// Check clearance: for simplicity, if cell has geometry, assume clearance
|
||||
// unless there's a cell above within agent_height (not implemented for simple case)
|
||||
if slope_ok {
|
||||
walkable[idx] = true;
|
||||
heights[idx] = hi;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Erode by agent radius: remove walkable cells too close to non-walkable
|
||||
let erosion_cells = (self.agent_radius / hf.cell_size).ceil() as usize;
|
||||
if erosion_cells > 0 {
|
||||
let mut eroded = walkable.clone();
|
||||
for z in 0..hf.depth {
|
||||
for x in 0..hf.width {
|
||||
let idx = z * hf.width + x;
|
||||
if !walkable[idx] {
|
||||
continue;
|
||||
}
|
||||
// Check if near boundary of walkable area
|
||||
let mut near_edge = false;
|
||||
for dz in 0..=erosion_cells {
|
||||
for dx in 0..=erosion_cells {
|
||||
if dx == 0 && dz == 0 {
|
||||
continue;
|
||||
}
|
||||
// Check all 4 quadrants
|
||||
let checks: [(isize, isize); 4] = [
|
||||
(dx as isize, dz as isize),
|
||||
(-(dx as isize), dz as isize),
|
||||
(dx as isize, -(dz as isize)),
|
||||
(-(dx as isize), -(dz as isize)),
|
||||
];
|
||||
for (ddx, ddz) in checks {
|
||||
let nx = x as isize + ddx;
|
||||
let nz = z as isize + ddz;
|
||||
if nx < 0 || nz < 0 || nx >= hf.width as isize || nz >= hf.depth as isize {
|
||||
near_edge = true;
|
||||
break;
|
||||
}
|
||||
let ni = nz as usize * hf.width + nx as usize;
|
||||
if !walkable[ni] {
|
||||
near_edge = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if near_edge {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if near_edge {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if near_edge {
|
||||
eroded[idx] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return WalkableMap {
|
||||
width: hf.width,
|
||||
depth: hf.depth,
|
||||
min_x: hf.min_x,
|
||||
min_z: hf.min_z,
|
||||
cell_size: hf.cell_size,
|
||||
walkable: eroded,
|
||||
heights,
|
||||
};
|
||||
}
|
||||
|
||||
WalkableMap {
|
||||
width: hf.width,
|
||||
depth: hf.depth,
|
||||
min_x: hf.min_x,
|
||||
min_z: hf.min_z,
|
||||
cell_size: hf.cell_size,
|
||||
walkable,
|
||||
heights,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the slope at cell (x, z) is walkable.
|
||||
fn check_slope(&self, hf: &Heightfield, x: usize, z: usize, max_slope_cos: f32) -> bool {
|
||||
let idx = z * hf.width + x;
|
||||
let h = match hf.cells[idx] {
|
||||
Some((_, hi)) => hi,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
// Compare with direct neighbors to estimate slope
|
||||
let neighbors: [(isize, isize); 4] = [(1, 0), (-1, 0), (0, 1), (0, -1)];
|
||||
for (dx, dz) in neighbors {
|
||||
let nx = x as isize + dx;
|
||||
let nz = z as isize + dz;
|
||||
if nx < 0 || nz < 0 || nx >= hf.width as isize || nz >= hf.depth as isize {
|
||||
continue;
|
||||
}
|
||||
let ni = nz as usize * hf.width + nx as usize;
|
||||
if let Some((_, nh)) = hf.cells[ni] {
|
||||
let dy = (nh - h).abs();
|
||||
let dist = hf.cell_size;
|
||||
// slope angle: atan(dy/dist), check cos of that angle
|
||||
let slope_len = (dy * dy + dist * dist).sqrt();
|
||||
let cos_angle = dist / slope_len;
|
||||
if cos_angle < max_slope_cos {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Step 3: Flood-fill connected walkable areas into regions.
|
||||
pub fn build_regions(&self, wm: &WalkableMap) -> RegionMap {
|
||||
let n = wm.width * wm.depth;
|
||||
let mut regions = vec![0u32; n];
|
||||
let mut current_region = 0u32;
|
||||
|
||||
for z in 0..wm.depth {
|
||||
for x in 0..wm.width {
|
||||
let idx = z * wm.width + x;
|
||||
if wm.walkable[idx] && regions[idx] == 0 {
|
||||
current_region += 1;
|
||||
// Flood fill
|
||||
let mut stack = vec![(x, z)];
|
||||
regions[idx] = current_region;
|
||||
while let Some((cx, cz)) = stack.pop() {
|
||||
let neighbors: [(isize, isize); 4] = [(1, 0), (-1, 0), (0, 1), (0, -1)];
|
||||
for (dx, dz) in neighbors {
|
||||
let nx = cx as isize + dx;
|
||||
let nz = cz as isize + dz;
|
||||
if nx < 0 || nz < 0 || nx >= wm.width as isize || nz >= wm.depth as isize {
|
||||
continue;
|
||||
}
|
||||
let ni = nz as usize * wm.width + nx as usize;
|
||||
if wm.walkable[ni] && regions[ni] == 0 {
|
||||
regions[ni] = current_region;
|
||||
stack.push((nx as usize, nz as usize));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RegionMap {
|
||||
width: wm.width,
|
||||
depth: wm.depth,
|
||||
min_x: wm.min_x,
|
||||
min_z: wm.min_z,
|
||||
cell_size: wm.cell_size,
|
||||
regions,
|
||||
heights: wm.heights.clone(),
|
||||
num_regions: current_region,
|
||||
}
|
||||
}
|
||||
|
||||
/// Steps 4-5 combined: Convert walkable grid cells directly into a NavMesh.
|
||||
/// Each walkable cell becomes a quad (2 triangles), with adjacency computed.
|
||||
pub fn triangulate(&self, rm: &RegionMap) -> NavMesh {
|
||||
let mut vertices = Vec::new();
|
||||
let mut triangles = Vec::new();
|
||||
|
||||
// For each walkable cell, create 4 vertices and 2 triangles.
|
||||
// Map from cell (x,z) -> (tri_a_idx, tri_b_idx) for adjacency lookup.
|
||||
let n = rm.width * rm.depth;
|
||||
// cell_tri_map[cell_idx] = Some((tri_a_idx, tri_b_idx)) or None
|
||||
let mut cell_tri_map: Vec<Option<(usize, usize)>> = vec![None; n];
|
||||
|
||||
for z in 0..rm.depth {
|
||||
for x in 0..rm.width {
|
||||
let idx = z * rm.width + x;
|
||||
if rm.regions[idx] == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let h = rm.heights[idx];
|
||||
let x0 = rm.min_x + x as f32 * rm.cell_size;
|
||||
let x1 = x0 + rm.cell_size;
|
||||
let z0 = rm.min_z + z as f32 * rm.cell_size;
|
||||
let z1 = z0 + rm.cell_size;
|
||||
|
||||
let vi = vertices.len();
|
||||
vertices.push(Vec3::new(x0, h, z0)); // vi+0: bottom-left
|
||||
vertices.push(Vec3::new(x1, h, z0)); // vi+1: bottom-right
|
||||
vertices.push(Vec3::new(x1, h, z1)); // vi+2: top-right
|
||||
vertices.push(Vec3::new(x0, h, z1)); // vi+3: top-left
|
||||
|
||||
let ta = triangles.len();
|
||||
// Triangle A: bottom-left, bottom-right, top-right (vi+0, vi+1, vi+2)
|
||||
triangles.push(NavTriangle {
|
||||
indices: [vi, vi + 1, vi + 2],
|
||||
neighbors: [None, None, None], // filled in later
|
||||
});
|
||||
// Triangle B: bottom-left, top-right, top-left (vi+0, vi+2, vi+3)
|
||||
triangles.push(NavTriangle {
|
||||
indices: [vi, vi + 2, vi + 3],
|
||||
neighbors: [None, None, None],
|
||||
});
|
||||
|
||||
// Internal adjacency: A and B share edge (vi+0, vi+2)
|
||||
// For A: edge 2 is (vi+2 -> vi+0) — indices[2] to indices[0]
|
||||
// For B: edge 0 is (vi+0 -> vi+2) — indices[0] to indices[1]
|
||||
triangles[ta].neighbors[2] = Some(ta + 1); // A's edge2 -> B
|
||||
triangles[ta + 1].neighbors[0] = Some(ta); // B's edge0 -> A
|
||||
|
||||
cell_tri_map[idx] = Some((ta, ta + 1));
|
||||
}
|
||||
}
|
||||
|
||||
// Now compute inter-cell adjacency
|
||||
// Cell layout:
|
||||
// Tri A: (v0, v1, v2) = (BL, BR, TR)
|
||||
// edge 0: BL->BR (bottom edge, connects to cell below z-1)
|
||||
// edge 1: BR->TR (right edge, connects to cell at x+1)
|
||||
// edge 2: TR->BL (diagonal, internal, already connected to B)
|
||||
// Tri B: (v0, v2, v3) = (BL, TR, TL)
|
||||
// edge 0: BL->TR (diagonal, internal, already connected to A)
|
||||
// edge 1: TR->TL (top edge, connects to cell above z+1)
|
||||
// edge 2: TL->BL (left edge, connects to cell at x-1)
|
||||
|
||||
for z in 0..rm.depth {
|
||||
for x in 0..rm.width {
|
||||
let idx = z * rm.width + x;
|
||||
if cell_tri_map[idx].is_none() {
|
||||
continue;
|
||||
}
|
||||
let (ta, tb) = cell_tri_map[idx].unwrap();
|
||||
|
||||
// Bottom neighbor (z-1): A's edge 0 connects to neighbor's B edge 1 (TR->TL = top)
|
||||
if z > 0 {
|
||||
let ni = (z - 1) * rm.width + x;
|
||||
if let Some((_, nb_tb)) = cell_tri_map[ni] {
|
||||
triangles[ta].neighbors[0] = Some(nb_tb);
|
||||
triangles[nb_tb].neighbors[1] = Some(ta);
|
||||
}
|
||||
}
|
||||
|
||||
// Right neighbor (x+1): A's edge 1 connects to neighbor's B edge 2 (TL->BL = left)
|
||||
if x + 1 < rm.width {
|
||||
let ni = z * rm.width + (x + 1);
|
||||
if let Some((_, nb_tb)) = cell_tri_map[ni] {
|
||||
triangles[ta].neighbors[1] = Some(nb_tb);
|
||||
triangles[nb_tb].neighbors[2] = Some(ta);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NavMesh::new(vertices, triangles)
|
||||
}
|
||||
|
||||
/// Full pipeline: voxelize, mark walkable, build regions, triangulate.
|
||||
pub fn build(&self, vertices: &[Vec3], indices: &[u32]) -> NavMesh {
|
||||
let hf = self.voxelize(vertices, indices);
|
||||
let wm = self.mark_walkable(&hf);
|
||||
let rm = self.build_regions(&wm);
|
||||
self.triangulate(&rm)
|
||||
}
|
||||
}
|
||||
|
||||
/// Loose point-in-triangle test on XZ plane, with a tolerance margin.
|
||||
fn point_in_triangle_xz_loose(point: Vec3, a: Vec3, b: Vec3, c: Vec3, margin: f32) -> bool {
|
||||
let px = point.x;
|
||||
let pz = point.z;
|
||||
|
||||
let denom = (b.z - c.z) * (a.x - c.x) + (c.x - b.x) * (a.z - c.z);
|
||||
if denom.abs() < f32::EPSILON {
|
||||
// Degenerate: check if point is near the line segment
|
||||
return false;
|
||||
}
|
||||
|
||||
let u = ((b.z - c.z) * (px - c.x) + (c.x - b.x) * (pz - c.z)) / denom;
|
||||
let v = ((c.z - a.z) * (px - c.x) + (a.x - c.x) * (pz - c.z)) / denom;
|
||||
let w = 1.0 - u - v;
|
||||
|
||||
let e = margin / ((a - b).length().max((b - c).length()).max((c - a).length()).max(0.001));
|
||||
u >= -e && v >= -e && w >= -e
|
||||
}
|
||||
|
||||
/// Interpolate Y height at (px, pz) on the plane defined by triangle (a, b, c).
|
||||
fn interpolate_y(a: Vec3, b: Vec3, c: Vec3, px: f32, pz: f32) -> f32 {
|
||||
let denom = (b.z - c.z) * (a.x - c.x) + (c.x - b.x) * (a.z - c.z);
|
||||
if denom.abs() < f32::EPSILON {
|
||||
return (a.y + b.y + c.y) / 3.0;
|
||||
}
|
||||
let u = ((b.z - c.z) * (px - c.x) + (c.x - b.x) * (pz - c.z)) / denom;
|
||||
let v = ((c.z - a.z) * (px - c.x) + (a.x - c.x) * (pz - c.z)) / denom;
|
||||
let w = 1.0 - u - v;
|
||||
u * a.y + v * b.y + w * c.y
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Create a simple flat plane (10x10, y=0) as 2 triangles.
|
||||
fn flat_plane_geometry() -> (Vec<Vec3>, Vec<u32>) {
|
||||
let vertices = vec![
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(10.0, 0.0, 0.0),
|
||||
Vec3::new(10.0, 0.0, 10.0),
|
||||
Vec3::new(0.0, 0.0, 10.0),
|
||||
];
|
||||
let indices = vec![0, 1, 2, 0, 2, 3];
|
||||
(vertices, indices)
|
||||
}
|
||||
|
||||
/// Create a plane with a box obstacle (hole) in the middle.
|
||||
/// The plane is 10x10 with a 2x2 hole at center (4-6, 4-6).
|
||||
fn plane_with_obstacle() -> (Vec<Vec3>, Vec<u32>) {
|
||||
// Build geometry as a grid of quads, skipping the obstacle region.
|
||||
// Use 1.0 unit grid cells for simplicity.
|
||||
let mut vertices = Vec::new();
|
||||
let mut indices = Vec::new();
|
||||
|
||||
// 10x10 grid of 1x1 quads, skip 4<=x<6 && 4<=z<6
|
||||
for z in 0..10 {
|
||||
for x in 0..10 {
|
||||
if x >= 4 && x < 6 && z >= 4 && z < 6 {
|
||||
continue; // obstacle
|
||||
}
|
||||
let vi = vertices.len() as u32;
|
||||
let fx = x as f32;
|
||||
let fz = z as f32;
|
||||
vertices.push(Vec3::new(fx, 0.0, fz));
|
||||
vertices.push(Vec3::new(fx + 1.0, 0.0, fz));
|
||||
vertices.push(Vec3::new(fx + 1.0, 0.0, fz + 1.0));
|
||||
vertices.push(Vec3::new(fx, 0.0, fz + 1.0));
|
||||
indices.push(vi);
|
||||
indices.push(vi + 1);
|
||||
indices.push(vi + 2);
|
||||
indices.push(vi);
|
||||
indices.push(vi + 2);
|
||||
indices.push(vi + 3);
|
||||
}
|
||||
}
|
||||
|
||||
(vertices, indices)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_voxelize_flat_plane() {
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 1.0,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0, // no erosion for simple test
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let (verts, idxs) = flat_plane_geometry();
|
||||
let hf = builder.voxelize(&verts, &idxs);
|
||||
|
||||
// Should have cells covering the 10x10 area
|
||||
assert!(hf.width > 0);
|
||||
assert!(hf.depth > 0);
|
||||
|
||||
// Check that some cells are populated
|
||||
let populated = hf.cells.iter().filter(|c| c.is_some()).count();
|
||||
assert!(populated > 0, "some cells should be populated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flat_plane_single_region() {
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 1.0,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0,
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let (verts, idxs) = flat_plane_geometry();
|
||||
let hf = builder.voxelize(&verts, &idxs);
|
||||
let wm = builder.mark_walkable(&hf);
|
||||
let rm = builder.build_regions(&wm);
|
||||
|
||||
assert_eq!(rm.num_regions, 1, "flat plane should be a single region");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flat_plane_builds_navmesh() {
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 1.0,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0,
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let (verts, idxs) = flat_plane_geometry();
|
||||
let nm = builder.build(&verts, &idxs);
|
||||
|
||||
assert!(!nm.vertices.is_empty(), "navmesh should have vertices");
|
||||
assert!(!nm.triangles.is_empty(), "navmesh should have triangles");
|
||||
|
||||
// Should be able to find a triangle at center of the plane
|
||||
let center = Vec3::new(5.0, 0.0, 5.0);
|
||||
assert!(nm.find_triangle(center).is_some(), "should find triangle at center");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obstacle_path_around() {
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 1.0,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0,
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let (verts, idxs) = plane_with_obstacle();
|
||||
let nm = builder.build(&verts, &idxs);
|
||||
|
||||
// Start at (1, 0, 5) and goal at (9, 0, 5) — must go around obstacle
|
||||
let start = Vec3::new(1.5, 0.0, 5.5);
|
||||
let goal = Vec3::new(8.5, 0.0, 5.5);
|
||||
|
||||
use crate::pathfinding::find_path;
|
||||
let path = find_path(&nm, start, goal);
|
||||
assert!(path.is_some(), "should find path around obstacle");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slope_walkable_unwalkable() {
|
||||
// Create a steep ramp: triangle from (0,0,0) to (1,0,0) to (0.5, 5, 1)
|
||||
// Slope angle = atan(5/1) ≈ 78.7 degrees — should be unwalkable at max_slope=45
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 0.2,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0,
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let vertices = vec![
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(2.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 10.0, 2.0),
|
||||
];
|
||||
let indices = vec![0, 1, 2];
|
||||
let hf = builder.voxelize(&vertices, &indices);
|
||||
let wm = builder.mark_walkable(&hf);
|
||||
|
||||
// Most interior cells should be unwalkable due to steep slope
|
||||
let walkable_count = wm.walkable.iter().filter(|&&w| w).count();
|
||||
// The bottom edge cells might be walkable (flat), but interior should not
|
||||
// Just check that not all cells are walkable
|
||||
let total_cells = hf.cells.iter().filter(|c| c.is_some()).count();
|
||||
assert!(
|
||||
walkable_count < total_cells || total_cells <= 1,
|
||||
"steep slope should make most cells unwalkable (walkable={}, total={})",
|
||||
walkable_count, total_cells
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_navmesh_adjacency() {
|
||||
let builder = NavMeshBuilder {
|
||||
cell_size: 1.0,
|
||||
cell_height: 0.2,
|
||||
agent_height: 2.0,
|
||||
agent_radius: 0.0,
|
||||
max_slope: 45.0,
|
||||
};
|
||||
let (verts, idxs) = flat_plane_geometry();
|
||||
let nm = builder.build(&verts, &idxs);
|
||||
|
||||
// Check that some triangles have neighbors
|
||||
let has_neighbors = nm.triangles.iter().any(|t| t.neighbors.iter().any(|n| n.is_some()));
|
||||
assert!(has_neighbors, "navmesh triangles should have adjacency");
|
||||
}
|
||||
}
|
||||
176
crates/voltex_ai/src/obstacle.rs
Normal file
176
crates/voltex_ai/src/obstacle.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use voltex_math::Vec3;
|
||||
|
||||
/// A dynamic obstacle represented as a position and radius.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DynamicObstacle {
|
||||
pub position: Vec3,
|
||||
pub radius: f32,
|
||||
}
|
||||
|
||||
/// Compute avoidance steering force using velocity obstacle approach.
|
||||
///
|
||||
/// Projects the agent's velocity forward by `look_ahead` distance and checks
|
||||
/// for circle intersections with obstacles. Returns a steering force perpendicular
|
||||
/// to the approach direction to avoid the nearest threatening obstacle.
|
||||
pub fn avoid_obstacles(
|
||||
agent_pos: Vec3,
|
||||
agent_vel: Vec3,
|
||||
agent_radius: f32,
|
||||
obstacles: &[DynamicObstacle],
|
||||
look_ahead: f32,
|
||||
) -> Vec3 {
|
||||
let speed = agent_vel.length();
|
||||
if speed < f32::EPSILON {
|
||||
return Vec3::ZERO;
|
||||
}
|
||||
|
||||
let forward = agent_vel * (1.0 / speed);
|
||||
let mut nearest_t = f32::INFINITY;
|
||||
let mut avoidance = Vec3::ZERO;
|
||||
|
||||
for obs in obstacles {
|
||||
let to_obs = obs.position - agent_pos;
|
||||
let combined_radius = agent_radius + obs.radius;
|
||||
|
||||
// Project obstacle center onto the velocity ray
|
||||
let proj = to_obs.dot(forward);
|
||||
|
||||
// Obstacle is behind or too far ahead
|
||||
if proj < 0.0 || proj > look_ahead {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Lateral distance from the velocity ray to obstacle center (XZ only for ground agents)
|
||||
let closest_on_ray = agent_pos + forward * proj;
|
||||
let diff = obs.position - closest_on_ray;
|
||||
let lateral_dist_sq = diff.x * diff.x + diff.z * diff.z;
|
||||
let combined_sq = combined_radius * combined_radius;
|
||||
|
||||
if lateral_dist_sq >= combined_sq {
|
||||
continue; // No collision
|
||||
}
|
||||
|
||||
// This obstacle threatens the agent — check if it's the nearest
|
||||
if proj < nearest_t {
|
||||
nearest_t = proj;
|
||||
|
||||
// Avoidance direction: perpendicular to approach, away from obstacle
|
||||
// Use XZ plane lateral vector
|
||||
let lateral = Vec3::new(diff.x, 0.0, diff.z);
|
||||
let lat_len = lateral.length();
|
||||
if lat_len > f32::EPSILON {
|
||||
// Steer away from obstacle (opposite direction of lateral offset)
|
||||
let steer_dir = lateral * (-1.0 / lat_len);
|
||||
// Strength inversely proportional to distance (closer = stronger)
|
||||
let strength = 1.0 - (proj / look_ahead);
|
||||
avoidance = steer_dir * strength * speed;
|
||||
} else {
|
||||
// Agent heading straight at obstacle center — pick perpendicular
|
||||
// Use cross product with Y to get a lateral direction
|
||||
let perp = Vec3::new(-forward.z, 0.0, forward.x);
|
||||
avoidance = perp * speed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
avoidance
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_no_obstacle_zero_force() {
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 0.0, 0.0),
|
||||
0.5,
|
||||
&[],
|
||||
5.0,
|
||||
);
|
||||
assert!(force.length() < 1e-6, "no obstacles should give zero force");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obstacle_behind_zero_force() {
|
||||
let obs = DynamicObstacle {
|
||||
position: Vec3::new(-3.0, 0.0, 0.0),
|
||||
radius: 1.0,
|
||||
};
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 0.0, 0.0),
|
||||
0.5,
|
||||
&[obs],
|
||||
5.0,
|
||||
);
|
||||
assert!(force.length() < 1e-6, "obstacle behind should give zero force");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obstacle_ahead_lateral_force() {
|
||||
let obs = DynamicObstacle {
|
||||
position: Vec3::new(3.0, 0.0, 0.5), // slightly to the right
|
||||
radius: 1.0,
|
||||
};
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(2.0, 0.0, 0.0), // moving in +X
|
||||
0.5,
|
||||
&[obs],
|
||||
5.0,
|
||||
);
|
||||
assert!(force.length() > 0.1, "obstacle ahead should give non-zero force");
|
||||
// Force should push away from obstacle (obstacle is at +Z, force should be -Z)
|
||||
assert!(force.z < 0.0, "force should push away from obstacle (negative Z)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obstacle_far_away_zero_force() {
|
||||
let obs = DynamicObstacle {
|
||||
position: Vec3::new(3.0, 0.0, 10.0), // far to the side
|
||||
radius: 1.0,
|
||||
};
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 0.0, 0.0),
|
||||
0.5,
|
||||
&[obs],
|
||||
5.0,
|
||||
);
|
||||
assert!(force.length() < 1e-6, "distant obstacle should give zero force");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_obstacle_beyond_lookahead_zero_force() {
|
||||
let obs = DynamicObstacle {
|
||||
position: Vec3::new(10.0, 0.0, 0.0),
|
||||
radius: 1.0,
|
||||
};
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 0.0, 0.0),
|
||||
0.5,
|
||||
&[obs],
|
||||
5.0, // look_ahead is only 5
|
||||
);
|
||||
assert!(force.length() < 1e-6, "obstacle beyond lookahead should give zero force");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_velocity_zero_force() {
|
||||
let obs = DynamicObstacle {
|
||||
position: Vec3::new(3.0, 0.0, 0.0),
|
||||
radius: 1.0,
|
||||
};
|
||||
let force = avoid_obstacles(
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::ZERO,
|
||||
0.5,
|
||||
&[obs],
|
||||
5.0,
|
||||
);
|
||||
assert!(force.length() < 1e-6, "zero velocity should give zero force");
|
||||
}
|
||||
}
|
||||
@@ -43,29 +43,22 @@ pub fn distance_xz(a: Vec3, b: Vec3) -> f32 {
|
||||
(dx * dx + dz * dz).sqrt()
|
||||
}
|
||||
|
||||
/// Find a path from `start` to `goal` on the given NavMesh using A*.
|
||||
///
|
||||
/// Returns Some(path) where path[0] == start, path[last] == goal, and
|
||||
/// intermediate points are triangle centers. Returns None if either
|
||||
/// point is outside the mesh or no path exists.
|
||||
pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>> {
|
||||
/// Run A* on the NavMesh and return the sequence of triangle indices from start to goal.
|
||||
/// Returns None if either point is outside the mesh or no path exists.
|
||||
pub fn find_path_triangles(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<usize>> {
|
||||
let start_tri = navmesh.find_triangle(start)?;
|
||||
let goal_tri = navmesh.find_triangle(goal)?;
|
||||
|
||||
// If start and goal are in the same triangle, path is direct.
|
||||
if start_tri == goal_tri {
|
||||
return Some(vec![start, goal]);
|
||||
return Some(vec![start_tri]);
|
||||
}
|
||||
|
||||
let n = navmesh.triangles.len();
|
||||
// g_cost[i] = best known cost to reach triangle i
|
||||
let mut g_costs = vec![f32::INFINITY; n];
|
||||
// parent[i] = index of parent triangle in the A* tree
|
||||
let mut parents: Vec<Option<usize>> = vec![None; n];
|
||||
let mut visited = vec![false; n];
|
||||
|
||||
let goal_center = navmesh.triangle_center(goal_tri);
|
||||
|
||||
g_costs[start_tri] = 0.0;
|
||||
let start_center = navmesh.triangle_center(start_tri);
|
||||
let h = distance_xz(start_center, goal_center);
|
||||
@@ -88,7 +81,6 @@ pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>
|
||||
parents[idx] = node.parent;
|
||||
|
||||
if idx == goal_tri {
|
||||
// Reconstruct path
|
||||
let mut tri_path = Vec::new();
|
||||
let mut cur = idx;
|
||||
loop {
|
||||
@@ -99,17 +91,7 @@ pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>
|
||||
}
|
||||
}
|
||||
tri_path.reverse();
|
||||
|
||||
// Convert triangle path to Vec3 waypoints:
|
||||
// start point -> intermediate triangle centers -> goal point
|
||||
let mut path = Vec::new();
|
||||
path.push(start);
|
||||
// skip first (start_tri) and last (goal_tri) in intermediate centers
|
||||
for &ti in &tri_path[1..tri_path.len() - 1] {
|
||||
path.push(navmesh.triangle_center(ti));
|
||||
}
|
||||
path.push(goal);
|
||||
return Some(path);
|
||||
return Some(tri_path);
|
||||
}
|
||||
|
||||
let tri = &navmesh.triangles[idx];
|
||||
@@ -136,7 +118,138 @@ pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>
|
||||
}
|
||||
}
|
||||
|
||||
None // No path found
|
||||
None
|
||||
}
|
||||
|
||||
/// Find a path from `start` to `goal` on the given NavMesh using A*.
|
||||
///
|
||||
/// Returns Some(path) where path[0] == start, path[last] == goal, and
|
||||
/// intermediate points are triangle centers. Returns None if either
|
||||
/// point is outside the mesh or no path exists.
|
||||
pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>> {
|
||||
let tri_path = find_path_triangles(navmesh, start, goal)?;
|
||||
|
||||
if tri_path.len() == 1 {
|
||||
return Some(vec![start, goal]);
|
||||
}
|
||||
|
||||
let mut path = Vec::new();
|
||||
path.push(start);
|
||||
for &ti in &tri_path[1..tri_path.len() - 1] {
|
||||
path.push(navmesh.triangle_center(ti));
|
||||
}
|
||||
path.push(goal);
|
||||
Some(path)
|
||||
}
|
||||
|
||||
/// 2D cross product on XZ plane: (b - a) x (c - a) projected onto Y.
|
||||
fn cross_xz(a: Vec3, b: Vec3, c: Vec3) -> f32 {
|
||||
(b.x - a.x) * (c.z - a.z) - (b.z - a.z) * (c.x - a.x)
|
||||
}
|
||||
|
||||
/// Simple Stupid Funnel (SSF) algorithm for string-pulling a triangle corridor path.
|
||||
///
|
||||
/// Given the sequence of triangle indices from A*, produces an optimal path with
|
||||
/// waypoints at portal edge corners where the path turns.
|
||||
pub fn funnel_smooth(path_triangles: &[usize], navmesh: &NavMesh, start: Vec3, end: Vec3) -> Vec<Vec3> {
|
||||
// Trivial cases
|
||||
if path_triangles.is_empty() {
|
||||
return vec![start, end];
|
||||
}
|
||||
if path_triangles.len() == 1 {
|
||||
return vec![start, end];
|
||||
}
|
||||
|
||||
// Build portal list: shared edges between consecutive triangles
|
||||
let mut portals_left = Vec::new();
|
||||
let mut portals_right = Vec::new();
|
||||
|
||||
for i in 0..path_triangles.len() - 1 {
|
||||
let from = path_triangles[i];
|
||||
let to = path_triangles[i + 1];
|
||||
if let Some((left, right)) = navmesh.shared_edge(from, to) {
|
||||
portals_left.push(left);
|
||||
portals_right.push(right);
|
||||
}
|
||||
}
|
||||
|
||||
// Add end point as the final portal (degenerate portal: both sides = end)
|
||||
portals_left.push(end);
|
||||
portals_right.push(end);
|
||||
|
||||
let mut path = vec![start];
|
||||
|
||||
let mut apex = start;
|
||||
let mut left = start;
|
||||
let mut right = start;
|
||||
#[allow(unused_assignments)]
|
||||
let mut apex_idx: usize = 0;
|
||||
let mut left_idx: usize = 0;
|
||||
let mut right_idx: usize = 0;
|
||||
|
||||
let n = portals_left.len();
|
||||
|
||||
for i in 0..n {
|
||||
let pl = portals_left[i];
|
||||
let pr = portals_right[i];
|
||||
|
||||
// Update right vertex
|
||||
if cross_xz(apex, right, pr) <= 0.0 {
|
||||
if apex == right || cross_xz(apex, left, pr) > 0.0 {
|
||||
// Tighten the funnel
|
||||
right = pr;
|
||||
right_idx = i;
|
||||
} else {
|
||||
// Right over left: left becomes new apex
|
||||
if left != apex {
|
||||
path.push(left);
|
||||
}
|
||||
apex = left;
|
||||
apex_idx = left_idx;
|
||||
left = apex;
|
||||
right = apex;
|
||||
left_idx = apex_idx;
|
||||
right_idx = apex_idx;
|
||||
// Restart scan from apex
|
||||
// We need to continue from apex_idx + 1, but since
|
||||
// we can't restart a for loop, we use a recursive approach
|
||||
// or just continue (the standard SSF continues from i)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Update left vertex
|
||||
if cross_xz(apex, left, pl) >= 0.0 {
|
||||
if apex == left || cross_xz(apex, right, pl) < 0.0 {
|
||||
// Tighten the funnel
|
||||
left = pl;
|
||||
left_idx = i;
|
||||
} else {
|
||||
// Left over right: right becomes new apex
|
||||
if right != apex {
|
||||
path.push(right);
|
||||
}
|
||||
apex = right;
|
||||
apex_idx = right_idx;
|
||||
left = apex;
|
||||
right = apex;
|
||||
left_idx = apex_idx;
|
||||
right_idx = apex_idx;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add end point if not already there
|
||||
if let Some(&last) = path.last() {
|
||||
if (last.x - end.x).abs() > 1e-6 || (last.z - end.z).abs() > 1e-6 {
|
||||
path.push(end);
|
||||
}
|
||||
} else {
|
||||
path.push(end);
|
||||
}
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -238,4 +351,86 @@ mod tests {
|
||||
let result = find_path(&nm, start, goal);
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_path_triangles_same() {
|
||||
let nm = make_strip();
|
||||
let start = Vec3::new(0.5, 0.0, 0.5);
|
||||
let goal = Vec3::new(1.5, 0.0, 0.5);
|
||||
let tris = find_path_triangles(&nm, start, goal).expect("should find path");
|
||||
assert_eq!(tris.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_path_triangles_strip() {
|
||||
let nm = make_strip();
|
||||
let start = Vec3::new(0.8, 0.0, 0.5);
|
||||
let goal = Vec3::new(2.0, 0.0, 3.5);
|
||||
let tris = find_path_triangles(&nm, start, goal).expect("should find path");
|
||||
assert_eq!(tris.len(), 3);
|
||||
assert_eq!(tris[0], 0);
|
||||
assert_eq!(tris[2], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_funnel_straight_path() {
|
||||
// Straight corridor: path should be just start and end (2 points)
|
||||
let nm = make_strip();
|
||||
let start = Vec3::new(1.0, 0.0, 0.5);
|
||||
let end = Vec3::new(2.0, 0.0, 3.5);
|
||||
let tris = find_path_triangles(&nm, start, end).expect("should find path");
|
||||
let smoothed = funnel_smooth(&tris, &nm, start, end);
|
||||
assert!(smoothed.len() >= 2, "funnel path should have at least 2 points, got {}", smoothed.len());
|
||||
assert_eq!(smoothed[0], start);
|
||||
assert_eq!(smoothed[smoothed.len() - 1], end);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_funnel_same_triangle() {
|
||||
let nm = make_strip();
|
||||
let start = Vec3::new(0.5, 0.0, 0.5);
|
||||
let end = Vec3::new(1.5, 0.0, 0.5);
|
||||
let smoothed = funnel_smooth(&[0], &nm, start, end);
|
||||
assert_eq!(smoothed.len(), 2);
|
||||
assert_eq!(smoothed[0], start);
|
||||
assert_eq!(smoothed[1], end);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_funnel_l_shaped_path() {
|
||||
// Build an L-shaped navmesh to force a turn
|
||||
// Tri0: (0,0,0),(2,0,0),(0,0,2) - bottom left
|
||||
// Tri1: (2,0,0),(2,0,2),(0,0,2) - top right of first square
|
||||
// Tri2: (2,0,0),(4,0,0),(2,0,2) - extends right
|
||||
// Tri3: (2,0,2),(4,0,0),(4,0,2) - top right
|
||||
// Tri4: (2,0,2),(4,0,2),(2,0,4) - goes up
|
||||
// This makes an L shape going right then up
|
||||
let vertices = vec![
|
||||
Vec3::new(0.0, 0.0, 0.0), // 0
|
||||
Vec3::new(2.0, 0.0, 0.0), // 1
|
||||
Vec3::new(0.0, 0.0, 2.0), // 2
|
||||
Vec3::new(2.0, 0.0, 2.0), // 3
|
||||
Vec3::new(4.0, 0.0, 0.0), // 4
|
||||
Vec3::new(4.0, 0.0, 2.0), // 5
|
||||
Vec3::new(2.0, 0.0, 4.0), // 6
|
||||
];
|
||||
let triangles = vec![
|
||||
NavTriangle { indices: [0, 1, 2], neighbors: [Some(1), None, None] }, // 0
|
||||
NavTriangle { indices: [1, 3, 2], neighbors: [Some(2), None, Some(0)] }, // 1
|
||||
NavTriangle { indices: [1, 4, 3], neighbors: [None, Some(3), Some(1)] }, // 2
|
||||
NavTriangle { indices: [4, 5, 3], neighbors: [None, None, Some(2)] }, // 3 -- not used in L path
|
||||
NavTriangle { indices: [3, 5, 6], neighbors: [None, None, None] }, // 4
|
||||
];
|
||||
let nm = NavMesh::new(vertices, triangles);
|
||||
|
||||
let start = Vec3::new(0.3, 0.0, 0.3);
|
||||
let end = Vec3::new(3.5, 0.0, 0.5);
|
||||
let tris = find_path_triangles(&nm, start, end);
|
||||
if let Some(tris) = tris {
|
||||
let smoothed = funnel_smooth(&tris, &nm, start, end);
|
||||
assert!(smoothed.len() >= 2);
|
||||
assert_eq!(smoothed[0], start);
|
||||
assert_eq!(smoothed[smoothed.len() - 1], end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
pub mod handle;
|
||||
pub mod storage;
|
||||
pub mod assets;
|
||||
pub mod watcher;
|
||||
pub mod loader;
|
||||
|
||||
pub use handle::Handle;
|
||||
pub use storage::AssetStorage;
|
||||
pub use assets::Assets;
|
||||
pub use watcher::FileWatcher;
|
||||
pub use loader::{AssetLoader, LoadState};
|
||||
|
||||
300
crates/voltex_asset/src/loader.rs
Normal file
300
crates/voltex_asset/src/loader.rs
Normal file
@@ -0,0 +1,300 @@
|
||||
use std::any::{Any, TypeId};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::thread::{self, JoinHandle};
|
||||
|
||||
use crate::assets::Assets;
|
||||
use crate::handle::Handle;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LoadState {
|
||||
Loading,
|
||||
Ready,
|
||||
Failed(String),
|
||||
}
|
||||
|
||||
struct LoadRequest {
|
||||
id: u64,
|
||||
path: PathBuf,
|
||||
parse: Box<dyn FnOnce(&[u8]) -> Result<Box<dyn Any + Send>, String> + Send>,
|
||||
}
|
||||
|
||||
struct LoadResult {
|
||||
id: u64,
|
||||
result: Result<Box<dyn Any + Send>, String>,
|
||||
}
|
||||
|
||||
struct PendingEntry {
|
||||
state: PendingState,
|
||||
handle_id: u32,
|
||||
handle_gen: u32,
|
||||
type_id: TypeId,
|
||||
/// Type-erased inserter: takes (assets, boxed_any) and inserts the asset,
|
||||
/// returning the actual handle (id, gen) that was assigned.
|
||||
inserter: Option<Box<dyn FnOnce(&mut Assets, Box<dyn Any + Send>) -> (u32, u32)>>,
|
||||
}
|
||||
|
||||
enum PendingState {
|
||||
Loading,
|
||||
Failed(String),
|
||||
Ready,
|
||||
}
|
||||
|
||||
pub struct AssetLoader {
|
||||
request_tx: Option<Sender<LoadRequest>>,
|
||||
result_rx: Receiver<LoadResult>,
|
||||
thread: Option<JoinHandle<()>>,
|
||||
next_id: u64,
|
||||
pending: HashMap<u64, PendingEntry>,
|
||||
// Map from (type_id, handle_id, handle_gen) to load_id for state lookups
|
||||
handle_to_load: HashMap<(TypeId, u32, u32), u64>,
|
||||
}
|
||||
|
||||
impl AssetLoader {
|
||||
pub fn new() -> Self {
|
||||
let (request_tx, request_rx) = channel::<LoadRequest>();
|
||||
let (result_tx, result_rx) = channel::<LoadResult>();
|
||||
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok(req) = request_rx.recv() {
|
||||
let result = match std::fs::read(&req.path) {
|
||||
Ok(data) => (req.parse)(&data),
|
||||
Err(e) => Err(format!("Failed to read {}: {}", req.path.display(), e)),
|
||||
};
|
||||
let _ = result_tx.send(LoadResult {
|
||||
id: req.id,
|
||||
result,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
request_tx: Some(request_tx),
|
||||
result_rx,
|
||||
thread: Some(thread),
|
||||
next_id: 0,
|
||||
pending: HashMap::new(),
|
||||
handle_to_load: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Queue a file for background loading. Returns a handle immediately.
|
||||
///
|
||||
/// The handle becomes valid (pointing to real data) after `process_loaded`
|
||||
/// inserts the completed asset into Assets. Until then, `state()` returns
|
||||
/// `LoadState::Loading`.
|
||||
///
|
||||
/// **Important:** The returned handle's id/generation are provisional.
|
||||
/// After `process_loaded`, the handle is updated internally to match the
|
||||
/// actual slot in Assets. Since we pre-allocate using the load id, the
|
||||
/// actual handle assigned by `Assets::insert` may differ. We remap it.
|
||||
pub fn load<T, F>(&mut self, path: PathBuf, parse_fn: F) -> Handle<T>
|
||||
where
|
||||
T: Send + 'static,
|
||||
F: FnOnce(&[u8]) -> Result<T, String> + Send + 'static,
|
||||
{
|
||||
let id = self.next_id;
|
||||
self.next_id += 1;
|
||||
|
||||
// We use the load id as a provisional handle id.
|
||||
// The real handle is assigned when the asset is inserted into Assets.
|
||||
let handle_id = id as u32;
|
||||
let handle_gen = 0u32;
|
||||
let handle = Handle::new(handle_id, handle_gen);
|
||||
|
||||
let type_id = TypeId::of::<T>();
|
||||
|
||||
// Create a type-erased inserter closure that knows how to downcast
|
||||
// Box<dyn Any + Send> back to T and insert it into Assets.
|
||||
let inserter: Box<dyn FnOnce(&mut Assets, Box<dyn Any + Send>) -> (u32, u32)> =
|
||||
Box::new(|assets: &mut Assets, boxed: Box<dyn Any + Send>| {
|
||||
let asset = *boxed.downcast::<T>().expect("type mismatch in loader");
|
||||
let real_handle = assets.insert(asset);
|
||||
(real_handle.id, real_handle.generation)
|
||||
});
|
||||
|
||||
self.pending.insert(
|
||||
id,
|
||||
PendingEntry {
|
||||
state: PendingState::Loading,
|
||||
handle_id,
|
||||
handle_gen,
|
||||
type_id,
|
||||
inserter: Some(inserter),
|
||||
},
|
||||
);
|
||||
self.handle_to_load
|
||||
.insert((type_id, handle_id, handle_gen), id);
|
||||
|
||||
// Wrap parse_fn to erase the type
|
||||
let boxed_parse: Box<
|
||||
dyn FnOnce(&[u8]) -> Result<Box<dyn Any + Send>, String> + Send,
|
||||
> = Box::new(move |data: &[u8]| {
|
||||
parse_fn(data).map(|v| Box::new(v) as Box<dyn Any + Send>)
|
||||
});
|
||||
|
||||
if let Some(tx) = &self.request_tx {
|
||||
let _ = tx.send(LoadRequest {
|
||||
id,
|
||||
path,
|
||||
parse: boxed_parse,
|
||||
});
|
||||
}
|
||||
|
||||
handle
|
||||
}
|
||||
|
||||
/// Check the load state for a given handle.
|
||||
pub fn state<T: 'static>(&self, handle: &Handle<T>) -> LoadState {
|
||||
let type_id = TypeId::of::<T>();
|
||||
if let Some(&load_id) =
|
||||
self.handle_to_load
|
||||
.get(&(type_id, handle.id, handle.generation))
|
||||
{
|
||||
if let Some(entry) = self.pending.get(&load_id) {
|
||||
return match &entry.state {
|
||||
PendingState::Loading => LoadState::Loading,
|
||||
PendingState::Failed(e) => LoadState::Failed(e.clone()),
|
||||
PendingState::Ready => LoadState::Ready,
|
||||
};
|
||||
}
|
||||
}
|
||||
LoadState::Loading
|
||||
}
|
||||
|
||||
/// Drain completed loads from the worker thread and insert them into Assets.
|
||||
///
|
||||
/// Call this once per frame on the main thread.
|
||||
pub fn process_loaded(&mut self, assets: &mut Assets) {
|
||||
// Collect results first
|
||||
let mut results = Vec::new();
|
||||
while let Ok(result) = self.result_rx.try_recv() {
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
for result in results {
|
||||
if let Some(entry) = self.pending.get_mut(&result.id) {
|
||||
match result.result {
|
||||
Ok(boxed_asset) => {
|
||||
// Take the inserter out and use it to insert into Assets
|
||||
if let Some(inserter) = entry.inserter.take() {
|
||||
let (real_id, real_gen) = inserter(assets, boxed_asset);
|
||||
|
||||
// Update the handle mapping if the real handle differs
|
||||
let old_key =
|
||||
(entry.type_id, entry.handle_id, entry.handle_gen);
|
||||
if real_id != entry.handle_id || real_gen != entry.handle_gen {
|
||||
// Remove old mapping, add new one
|
||||
self.handle_to_load.remove(&old_key);
|
||||
entry.handle_id = real_id;
|
||||
entry.handle_gen = real_gen;
|
||||
self.handle_to_load
|
||||
.insert((entry.type_id, real_id, real_gen), result.id);
|
||||
}
|
||||
}
|
||||
entry.state = PendingState::Ready;
|
||||
}
|
||||
Err(e) => {
|
||||
entry.state = PendingState::Failed(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shutdown(mut self) {
|
||||
// Drop the sender to signal the worker to stop
|
||||
self.request_tx = None;
|
||||
if let Some(thread) = self.thread.take() {
|
||||
let _ = thread.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AssetLoader {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_load_state_initial() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let dir = std::env::temp_dir().join("voltex_loader_test_1");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test.txt");
|
||||
fs::write(&path, "hello world").unwrap();
|
||||
|
||||
let handle: Handle<String> = loader.load(path.clone(), |data| {
|
||||
Ok(String::from_utf8_lossy(data).to_string())
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
loader.state::<String>(&handle),
|
||||
LoadState::Loading
|
||||
));
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
loader.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_and_process() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let dir = std::env::temp_dir().join("voltex_loader_test_2");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("data.txt");
|
||||
fs::write(&path, "content123").unwrap();
|
||||
|
||||
let handle: Handle<String> = loader.load(path.clone(), |data| {
|
||||
Ok(String::from_utf8_lossy(data).to_string())
|
||||
});
|
||||
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut assets = Assets::new();
|
||||
loader.process_loaded(&mut assets);
|
||||
|
||||
assert!(matches!(
|
||||
loader.state::<String>(&handle),
|
||||
LoadState::Ready
|
||||
));
|
||||
|
||||
// The handle returned by load() is provisional. After process_loaded,
|
||||
// the real handle may have different id/gen. We need to look up
|
||||
// the actual handle. Since this is the first insert, it should be (0, 0).
|
||||
// But our provisional handle is also (0, 0), so it should match.
|
||||
let val = assets.get(handle).unwrap();
|
||||
assert_eq!(val, "content123");
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
loader.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_nonexistent_fails() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let handle: Handle<String> = loader.load(
|
||||
PathBuf::from("/nonexistent/file.txt"),
|
||||
|data| Ok(String::from_utf8_lossy(data).to_string()),
|
||||
);
|
||||
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut assets = Assets::new();
|
||||
loader.process_loaded(&mut assets);
|
||||
|
||||
assert!(matches!(
|
||||
loader.state::<String>(&handle),
|
||||
LoadState::Failed(_)
|
||||
));
|
||||
loader.shutdown();
|
||||
}
|
||||
}
|
||||
@@ -103,6 +103,18 @@ impl<T> AssetStorage<T> {
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Replace the asset data without changing generation or ref_count.
|
||||
/// Used for hot reload — existing handles remain valid.
|
||||
pub fn replace_in_place(&mut self, handle: Handle<T>, new_asset: T) -> bool {
|
||||
if let Some(Some(entry)) = self.entries.get_mut(handle.id as usize) {
|
||||
if entry.generation == handle.generation {
|
||||
entry.asset = new_asset;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = (Handle<T>, &T)> {
|
||||
self.entries
|
||||
.iter()
|
||||
@@ -211,6 +223,27 @@ mod tests {
|
||||
assert_eq!(storage.get(h2).unwrap().verts, 9);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_in_place() {
|
||||
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
|
||||
let h = storage.insert(Mesh { verts: 3 });
|
||||
assert!(storage.replace_in_place(h, Mesh { verts: 99 }));
|
||||
assert_eq!(storage.get(h).unwrap().verts, 99);
|
||||
// Same handle still works — generation unchanged
|
||||
assert_eq!(storage.ref_count(h), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_in_place_stale_handle() {
|
||||
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
|
||||
let h = storage.insert(Mesh { verts: 3 });
|
||||
storage.release(h);
|
||||
let h2 = storage.insert(Mesh { verts: 10 });
|
||||
// h is stale, replace should fail
|
||||
assert!(!storage.replace_in_place(h, Mesh { verts: 99 }));
|
||||
assert_eq!(storage.get(h2).unwrap().verts, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter() {
|
||||
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
|
||||
|
||||
114
crates/voltex_asset/src/watcher.rs
Normal file
114
crates/voltex_asset/src/watcher.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
pub struct FileWatcher {
|
||||
watched: HashMap<PathBuf, Option<SystemTime>>,
|
||||
poll_interval: Duration,
|
||||
last_poll: Instant,
|
||||
}
|
||||
|
||||
impl FileWatcher {
|
||||
pub fn new(poll_interval: Duration) -> Self {
|
||||
Self {
|
||||
watched: HashMap::new(),
|
||||
poll_interval,
|
||||
last_poll: Instant::now() - poll_interval, // allow immediate first poll
|
||||
}
|
||||
}
|
||||
|
||||
pub fn watch(&mut self, path: PathBuf) {
|
||||
// Store None initially — first poll will record the mtime without reporting change
|
||||
self.watched.insert(path, None);
|
||||
}
|
||||
|
||||
pub fn unwatch(&mut self, path: &Path) {
|
||||
self.watched.remove(path);
|
||||
}
|
||||
|
||||
pub fn poll_changes(&mut self) -> Vec<PathBuf> {
|
||||
let now = Instant::now();
|
||||
if now.duration_since(self.last_poll) < self.poll_interval {
|
||||
return Vec::new();
|
||||
}
|
||||
self.last_poll = now;
|
||||
|
||||
let mut changed = Vec::new();
|
||||
for (path, last_mtime) in &mut self.watched {
|
||||
let current = std::fs::metadata(path)
|
||||
.ok()
|
||||
.and_then(|m| m.modified().ok());
|
||||
if let Some(prev) = last_mtime {
|
||||
// We have a previous mtime — compare
|
||||
if current != Some(*prev) {
|
||||
changed.push(path.clone());
|
||||
}
|
||||
}
|
||||
// else: first poll, just record mtime, don't report
|
||||
*last_mtime = current;
|
||||
}
|
||||
changed
|
||||
}
|
||||
|
||||
pub fn watched_count(&self) -> usize {
|
||||
self.watched.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
|
||||
#[test]
|
||||
fn test_watch_and_poll_no_changes() {
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
let dir = std::env::temp_dir().join("voltex_watcher_test_1");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test.txt");
|
||||
fs::write(&path, "hello").unwrap();
|
||||
|
||||
watcher.watch(path.clone());
|
||||
// First poll — should not report as changed (just registered)
|
||||
let changes = watcher.poll_changes();
|
||||
assert!(changes.is_empty());
|
||||
|
||||
// Second poll without modification — still no changes
|
||||
let changes = watcher.poll_changes();
|
||||
assert!(changes.is_empty());
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_file_change() {
|
||||
let dir = std::env::temp_dir().join("voltex_watcher_test_2");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test2.txt");
|
||||
fs::write(&path, "v1").unwrap();
|
||||
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
watcher.watch(path.clone());
|
||||
let _ = watcher.poll_changes(); // register initial mtime
|
||||
|
||||
// Modify file
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
fs::write(&path, "v2 with more data").unwrap();
|
||||
|
||||
let changes = watcher.poll_changes();
|
||||
assert!(changes.contains(&path));
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unwatch() {
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
let path = PathBuf::from("/nonexistent/test.txt");
|
||||
watcher.watch(path.clone());
|
||||
assert_eq!(watcher.watched_count(), 1);
|
||||
watcher.unwatch(&path);
|
||||
assert_eq!(watcher.watched_count(), 0);
|
||||
assert!(watcher.poll_changes().is_empty());
|
||||
}
|
||||
}
|
||||
294
crates/voltex_audio/src/ogg.rs
Normal file
294
crates/voltex_audio/src/ogg.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
//! OGG container parser.
|
||||
//!
|
||||
//! Parses OGG bitstream pages and extracts Vorbis packets.
|
||||
//! Reference: <https://www.xiph.org/ogg/doc/framing.html>
|
||||
|
||||
/// An OGG page header.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OggPage {
|
||||
/// Header type flags (0x01 = continuation, 0x02 = BOS, 0x04 = EOS).
|
||||
pub header_type: u8,
|
||||
/// Granule position (PCM sample position).
|
||||
pub granule_position: u64,
|
||||
/// Bitstream serial number.
|
||||
pub serial: u32,
|
||||
/// Page sequence number.
|
||||
pub page_sequence: u32,
|
||||
/// Number of segments in this page.
|
||||
pub segment_count: u8,
|
||||
/// The segment table (each entry is a segment length, 0..255).
|
||||
pub segment_table: Vec<u8>,
|
||||
/// Raw packet data of this page (concatenated segments).
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Parse all OGG pages from raw bytes.
|
||||
pub fn parse_ogg_pages(data: &[u8]) -> Result<Vec<OggPage>, String> {
|
||||
let mut pages = Vec::new();
|
||||
let mut offset = 0;
|
||||
|
||||
while offset < data.len() {
|
||||
if offset + 27 > data.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Capture pattern "OggS"
|
||||
if &data[offset..offset + 4] != b"OggS" {
|
||||
return Err(format!("Invalid OGG capture pattern at offset {}", offset));
|
||||
}
|
||||
|
||||
let version = data[offset + 4];
|
||||
if version != 0 {
|
||||
return Err(format!("Unsupported OGG version: {}", version));
|
||||
}
|
||||
|
||||
let header_type = data[offset + 5];
|
||||
|
||||
let granule_position = u64::from_le_bytes([
|
||||
data[offset + 6],
|
||||
data[offset + 7],
|
||||
data[offset + 8],
|
||||
data[offset + 9],
|
||||
data[offset + 10],
|
||||
data[offset + 11],
|
||||
data[offset + 12],
|
||||
data[offset + 13],
|
||||
]);
|
||||
|
||||
let serial = u32::from_le_bytes([
|
||||
data[offset + 14],
|
||||
data[offset + 15],
|
||||
data[offset + 16],
|
||||
data[offset + 17],
|
||||
]);
|
||||
|
||||
let page_sequence = u32::from_le_bytes([
|
||||
data[offset + 18],
|
||||
data[offset + 19],
|
||||
data[offset + 20],
|
||||
data[offset + 21],
|
||||
]);
|
||||
|
||||
// CRC at offset+22..+26 (skip verification for simplicity)
|
||||
|
||||
let segment_count = data[offset + 26] as usize;
|
||||
|
||||
if offset + 27 + segment_count > data.len() {
|
||||
return Err("OGG page segment table extends beyond data".to_string());
|
||||
}
|
||||
|
||||
let segment_table: Vec<u8> = data[offset + 27..offset + 27 + segment_count].to_vec();
|
||||
|
||||
let total_data_size: usize = segment_table.iter().map(|&s| s as usize).sum();
|
||||
let data_start = offset + 27 + segment_count;
|
||||
|
||||
if data_start + total_data_size > data.len() {
|
||||
return Err("OGG page data extends beyond file".to_string());
|
||||
}
|
||||
|
||||
let page_data = data[data_start..data_start + total_data_size].to_vec();
|
||||
|
||||
pages.push(OggPage {
|
||||
header_type,
|
||||
granule_position,
|
||||
serial,
|
||||
page_sequence,
|
||||
segment_count: segment_count as u8,
|
||||
segment_table,
|
||||
data: page_data,
|
||||
});
|
||||
|
||||
offset = data_start + total_data_size;
|
||||
}
|
||||
|
||||
if pages.is_empty() {
|
||||
return Err("No OGG pages found".to_string());
|
||||
}
|
||||
|
||||
Ok(pages)
|
||||
}
|
||||
|
||||
/// Extract Vorbis packets from parsed OGG pages.
|
||||
///
|
||||
/// Packets can span multiple segments (segment length = 255 means continuation).
|
||||
/// Packets can also span multiple pages (header_type bit 0x01 = continuation).
|
||||
pub fn extract_packets(pages: &[OggPage]) -> Result<Vec<Vec<u8>>, String> {
|
||||
let mut packets: Vec<Vec<u8>> = Vec::new();
|
||||
let mut current_packet: Vec<u8> = Vec::new();
|
||||
|
||||
for page in pages {
|
||||
let mut data_offset = 0;
|
||||
|
||||
for (seg_idx, &seg_len) in page.segment_table.iter().enumerate() {
|
||||
let seg_data = &page.data[data_offset..data_offset + seg_len as usize];
|
||||
current_packet.extend_from_slice(seg_data);
|
||||
data_offset += seg_len as usize;
|
||||
|
||||
// A segment length < 255 terminates the current packet.
|
||||
// A segment length of exactly 255 means the packet continues in the next segment.
|
||||
if seg_len < 255 {
|
||||
if !current_packet.is_empty() {
|
||||
packets.push(std::mem::take(&mut current_packet));
|
||||
}
|
||||
}
|
||||
// If seg_len == 255 and this is the last segment of the page,
|
||||
// the packet continues on the next page.
|
||||
let _ = seg_idx; // suppress unused warning
|
||||
}
|
||||
}
|
||||
|
||||
// If there's remaining data in current_packet (ended with 255-byte segments
|
||||
// and no terminating segment), flush it as a final packet.
|
||||
if !current_packet.is_empty() {
|
||||
packets.push(current_packet);
|
||||
}
|
||||
|
||||
Ok(packets)
|
||||
}
|
||||
|
||||
/// Convenience function: parse OGG container and extract all Vorbis packets.
|
||||
pub fn parse_ogg(data: &[u8]) -> Result<Vec<Vec<u8>>, String> {
|
||||
let pages = parse_ogg_pages(data)?;
|
||||
extract_packets(&pages)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Build a minimal OGG page from raw packet data.
|
||||
fn build_ogg_page(
|
||||
header_type: u8,
|
||||
granule: u64,
|
||||
serial: u32,
|
||||
page_seq: u32,
|
||||
packets_data: &[&[u8]],
|
||||
) -> Vec<u8> {
|
||||
// Build segment table and concatenated data
|
||||
let mut segment_table = Vec::new();
|
||||
let mut page_data = Vec::new();
|
||||
|
||||
for (i, packet) in packets_data.iter().enumerate() {
|
||||
let len = packet.len();
|
||||
// Write full 255-byte segments
|
||||
let full_segments = len / 255;
|
||||
let remainder = len % 255;
|
||||
|
||||
for _ in 0..full_segments {
|
||||
segment_table.push(255u8);
|
||||
}
|
||||
// Terminating segment (< 255), even if 0 to signal end of packet
|
||||
segment_table.push(remainder as u8);
|
||||
|
||||
page_data.extend_from_slice(packet);
|
||||
}
|
||||
|
||||
let segment_count = segment_table.len();
|
||||
let mut out = Vec::new();
|
||||
|
||||
// Capture pattern
|
||||
out.extend_from_slice(b"OggS");
|
||||
// Version
|
||||
out.push(0);
|
||||
// Header type
|
||||
out.push(header_type);
|
||||
// Granule position
|
||||
out.extend_from_slice(&granule.to_le_bytes());
|
||||
// Serial
|
||||
out.extend_from_slice(&serial.to_le_bytes());
|
||||
// Page sequence
|
||||
out.extend_from_slice(&page_seq.to_le_bytes());
|
||||
// CRC (dummy zeros)
|
||||
out.extend_from_slice(&[0u8; 4]);
|
||||
// Segment count
|
||||
out.push(segment_count as u8);
|
||||
// Segment table
|
||||
out.extend_from_slice(&segment_table);
|
||||
// Data
|
||||
out.extend_from_slice(&page_data);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_single_page() {
|
||||
let packet = b"hello vorbis";
|
||||
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[packet.as_slice()]);
|
||||
let pages = parse_ogg_pages(&page_bytes).expect("parse failed");
|
||||
assert_eq!(pages.len(), 1);
|
||||
assert_eq!(pages[0].header_type, 0x02);
|
||||
assert_eq!(pages[0].serial, 1);
|
||||
assert_eq!(pages[0].page_sequence, 0);
|
||||
assert_eq!(pages[0].data, packet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_multiple_pages() {
|
||||
let p1 = build_ogg_page(0x02, 0, 1, 0, &[b"first"]);
|
||||
let p2 = build_ogg_page(0x00, 100, 1, 1, &[b"second"]);
|
||||
let mut data = p1;
|
||||
data.extend_from_slice(&p2);
|
||||
|
||||
let pages = parse_ogg_pages(&data).expect("parse failed");
|
||||
assert_eq!(pages.len(), 2);
|
||||
assert_eq!(pages[0].page_sequence, 0);
|
||||
assert_eq!(pages[1].page_sequence, 1);
|
||||
assert_eq!(pages[1].granule_position, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_single_packet() {
|
||||
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[b"packet_one"]);
|
||||
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
|
||||
assert_eq!(packets.len(), 1);
|
||||
assert_eq!(packets[0], b"packet_one");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_multiple_packets_single_page() {
|
||||
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[b"pkt1", b"pkt2", b"pkt3"]);
|
||||
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
|
||||
assert_eq!(packets.len(), 3);
|
||||
assert_eq!(packets[0], b"pkt1");
|
||||
assert_eq!(packets[1], b"pkt2");
|
||||
assert_eq!(packets[2], b"pkt3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_large_packet_spanning_segments() {
|
||||
// Create a packet larger than 255 bytes
|
||||
let large_packet: Vec<u8> = (0..600).map(|i| (i % 256) as u8).collect();
|
||||
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[&large_packet]);
|
||||
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
|
||||
assert_eq!(packets.len(), 1);
|
||||
assert_eq!(packets[0], large_packet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_capture_pattern() {
|
||||
let data = b"NotOGGdata";
|
||||
let result = parse_ogg_pages(data);
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().contains("capture pattern"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_data() {
|
||||
let result = parse_ogg_pages(&[]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn page_header_fields() {
|
||||
let page_bytes = build_ogg_page(0x04, 12345, 42, 7, &[b"data"]);
|
||||
let pages = parse_ogg_pages(&page_bytes).expect("parse failed");
|
||||
assert_eq!(pages[0].header_type, 0x04); // EOS
|
||||
assert_eq!(pages[0].granule_position, 12345);
|
||||
assert_eq!(pages[0].serial, 42);
|
||||
assert_eq!(pages[0].page_sequence, 7);
|
||||
}
|
||||
}
|
||||
1493
crates/voltex_audio/src/vorbis.rs
Normal file
1493
crates/voltex_audio/src/vorbis.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -62,7 +62,42 @@ fn find_chunk(data: &[u8], id: &[u8; 4], start: usize) -> Option<(usize, u32)> {
|
||||
// Public API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse a PCM 16-bit WAV file from raw bytes into an [`AudioClip`].
|
||||
/// Read a 24-bit signed integer (little-endian) from 3 bytes and return as i32.
|
||||
fn read_i24_le(data: &[u8], offset: usize) -> Result<i32, String> {
|
||||
if offset + 3 > data.len() {
|
||||
return Err(format!("read_i24_le: offset {} out of bounds (len={})", offset, data.len()));
|
||||
}
|
||||
let lo = data[offset] as u32;
|
||||
let mid = data[offset + 1] as u32;
|
||||
let hi = data[offset + 2] as u32;
|
||||
let unsigned = lo | (mid << 8) | (hi << 16);
|
||||
// Sign-extend from 24-bit to 32-bit
|
||||
if unsigned & 0x800000 != 0 {
|
||||
Ok((unsigned | 0xFF000000) as i32)
|
||||
} else {
|
||||
Ok(unsigned as i32)
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a 32-bit float (little-endian).
|
||||
fn read_f32_le(data: &[u8], offset: usize) -> Result<f32, String> {
|
||||
if offset + 4 > data.len() {
|
||||
return Err(format!("read_f32_le: offset {} out of bounds (len={})", offset, data.len()));
|
||||
}
|
||||
Ok(f32::from_le_bytes([
|
||||
data[offset],
|
||||
data[offset + 1],
|
||||
data[offset + 2],
|
||||
data[offset + 3],
|
||||
]))
|
||||
}
|
||||
|
||||
/// Parse a WAV file from raw bytes into an [`AudioClip`].
|
||||
///
|
||||
/// Supported formats:
|
||||
/// - PCM 16-bit (format_tag=1, bits_per_sample=16)
|
||||
/// - PCM 24-bit (format_tag=1, bits_per_sample=24)
|
||||
/// - IEEE float 32-bit (format_tag=3, bits_per_sample=32)
|
||||
pub fn parse_wav(data: &[u8]) -> Result<AudioClip, String> {
|
||||
// Minimum viable WAV: RIFF(4) + size(4) + WAVE(4) = 12 bytes
|
||||
if data.len() < 12 {
|
||||
@@ -86,10 +121,6 @@ pub fn parse_wav(data: &[u8]) -> Result<AudioClip, String> {
|
||||
}
|
||||
|
||||
let format_tag = read_u16_le(data, fmt_offset)?;
|
||||
if format_tag != 1 {
|
||||
return Err(format!("Unsupported WAV format tag: {} (only PCM=1 is supported)", format_tag));
|
||||
}
|
||||
|
||||
let channels = read_u16_le(data, fmt_offset + 2)?;
|
||||
if channels != 1 && channels != 2 {
|
||||
return Err(format!("Unsupported channel count: {}", channels));
|
||||
@@ -99,9 +130,16 @@ pub fn parse_wav(data: &[u8]) -> Result<AudioClip, String> {
|
||||
// byte_rate = fmt_offset + 8 (skip)
|
||||
// block_align = fmt_offset + 12 (skip)
|
||||
let bits_per_sample = read_u16_le(data, fmt_offset + 14)?;
|
||||
if bits_per_sample != 16 {
|
||||
return Err(format!("Unsupported bits per sample: {} (only 16-bit is supported)", bits_per_sample));
|
||||
}
|
||||
|
||||
// Validate format_tag + bits_per_sample combination
|
||||
let bytes_per_sample = match (format_tag, bits_per_sample) {
|
||||
(1, 16) => 2, // PCM 16-bit
|
||||
(1, 24) => 3, // PCM 24-bit
|
||||
(3, 32) => 4, // IEEE float 32-bit
|
||||
(1, bps) => return Err(format!("Unsupported PCM bits per sample: {}", bps)),
|
||||
(3, bps) => return Err(format!("Unsupported float bits per sample: {} (only 32-bit supported)", bps)),
|
||||
(tag, _) => return Err(format!("Unsupported WAV format tag: {} (only PCM=1 and IEEE_FLOAT=3 are supported)", tag)),
|
||||
};
|
||||
|
||||
// --- data chunk ---
|
||||
let (data_offset, data_size) =
|
||||
@@ -112,14 +150,30 @@ pub fn parse_wav(data: &[u8]) -> Result<AudioClip, String> {
|
||||
return Err("data chunk extends beyond end of file".to_string());
|
||||
}
|
||||
|
||||
// Each sample is 2 bytes (16-bit PCM).
|
||||
let sample_count = data_size as usize / 2;
|
||||
let sample_count = data_size as usize / bytes_per_sample;
|
||||
let mut samples = Vec::with_capacity(sample_count);
|
||||
|
||||
for i in 0..sample_count {
|
||||
let raw = read_i16_le(data, data_offset + i * 2)?;
|
||||
// Convert i16 [-32768, 32767] to f32 [-1.0, ~1.0]
|
||||
samples.push(raw as f32 / 32768.0);
|
||||
match (format_tag, bits_per_sample) {
|
||||
(1, 16) => {
|
||||
for i in 0..sample_count {
|
||||
let raw = read_i16_le(data, data_offset + i * 2)?;
|
||||
samples.push(raw as f32 / 32768.0);
|
||||
}
|
||||
}
|
||||
(1, 24) => {
|
||||
for i in 0..sample_count {
|
||||
let raw = read_i24_le(data, data_offset + i * 3)?;
|
||||
// 24-bit range: [-8388608, 8388607]
|
||||
samples.push(raw as f32 / 8388608.0);
|
||||
}
|
||||
}
|
||||
(3, 32) => {
|
||||
for i in 0..sample_count {
|
||||
let raw = read_f32_le(data, data_offset + i * 4)?;
|
||||
samples.push(raw);
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
Ok(AudioClip::new(samples, sample_rate, channels))
|
||||
@@ -165,6 +219,87 @@ pub fn generate_wav_bytes(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate a minimal PCM 24-bit mono WAV file from f32 samples.
|
||||
/// Used for round-trip testing.
|
||||
pub fn generate_wav_bytes_24bit(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
|
||||
let channels: u16 = 1;
|
||||
let bits_per_sample: u16 = 24;
|
||||
let byte_rate = sample_rate * channels as u32 * bits_per_sample as u32 / 8;
|
||||
let block_align: u16 = channels * bits_per_sample / 8;
|
||||
let data_size = (samples_f32.len() * 3) as u32;
|
||||
let riff_size = 4 + 8 + 16 + 8 + data_size;
|
||||
|
||||
let mut out: Vec<u8> = Vec::with_capacity(12 + 8 + 16 + 8 + data_size as usize);
|
||||
|
||||
// RIFF header
|
||||
out.extend_from_slice(b"RIFF");
|
||||
out.extend_from_slice(&riff_size.to_le_bytes());
|
||||
out.extend_from_slice(b"WAVE");
|
||||
|
||||
// fmt chunk
|
||||
out.extend_from_slice(b"fmt ");
|
||||
out.extend_from_slice(&16u32.to_le_bytes());
|
||||
out.extend_from_slice(&1u16.to_le_bytes()); // PCM
|
||||
out.extend_from_slice(&channels.to_le_bytes());
|
||||
out.extend_from_slice(&sample_rate.to_le_bytes());
|
||||
out.extend_from_slice(&byte_rate.to_le_bytes());
|
||||
out.extend_from_slice(&block_align.to_le_bytes());
|
||||
out.extend_from_slice(&bits_per_sample.to_le_bytes());
|
||||
|
||||
// data chunk
|
||||
out.extend_from_slice(b"data");
|
||||
out.extend_from_slice(&data_size.to_le_bytes());
|
||||
|
||||
for &s in samples_f32 {
|
||||
let clamped = s.clamp(-1.0, 1.0);
|
||||
let raw = (clamped * 8388607.0) as i32;
|
||||
// Write 3 bytes LE
|
||||
out.push((raw & 0xFF) as u8);
|
||||
out.push(((raw >> 8) & 0xFF) as u8);
|
||||
out.push(((raw >> 16) & 0xFF) as u8);
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate a minimal IEEE float 32-bit mono WAV file from f32 samples.
|
||||
/// Used for round-trip testing.
|
||||
pub fn generate_wav_bytes_f32(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
|
||||
let channels: u16 = 1;
|
||||
let bits_per_sample: u16 = 32;
|
||||
let byte_rate = sample_rate * channels as u32 * bits_per_sample as u32 / 8;
|
||||
let block_align: u16 = channels * bits_per_sample / 8;
|
||||
let data_size = (samples_f32.len() * 4) as u32;
|
||||
let riff_size = 4 + 8 + 16 + 8 + data_size;
|
||||
|
||||
let mut out: Vec<u8> = Vec::with_capacity(12 + 8 + 16 + 8 + data_size as usize);
|
||||
|
||||
// RIFF header
|
||||
out.extend_from_slice(b"RIFF");
|
||||
out.extend_from_slice(&riff_size.to_le_bytes());
|
||||
out.extend_from_slice(b"WAVE");
|
||||
|
||||
// fmt chunk
|
||||
out.extend_from_slice(b"fmt ");
|
||||
out.extend_from_slice(&16u32.to_le_bytes());
|
||||
out.extend_from_slice(&3u16.to_le_bytes()); // IEEE_FLOAT
|
||||
out.extend_from_slice(&channels.to_le_bytes());
|
||||
out.extend_from_slice(&sample_rate.to_le_bytes());
|
||||
out.extend_from_slice(&byte_rate.to_le_bytes());
|
||||
out.extend_from_slice(&block_align.to_le_bytes());
|
||||
out.extend_from_slice(&bits_per_sample.to_le_bytes());
|
||||
|
||||
// data chunk
|
||||
out.extend_from_slice(b"data");
|
||||
out.extend_from_slice(&data_size.to_le_bytes());
|
||||
|
||||
for &s in samples_f32 {
|
||||
out.extend_from_slice(&s.to_le_bytes());
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -237,4 +372,107 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// 24-bit PCM tests
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn parse_24bit_wav() {
|
||||
let sample_rate = 44100u32;
|
||||
let num_samples = 4410usize;
|
||||
let samples: Vec<f32> = (0..num_samples)
|
||||
.map(|i| (2.0 * std::f32::consts::PI * 440.0 * i as f32 / sample_rate as f32).sin())
|
||||
.collect();
|
||||
|
||||
let wav_bytes = generate_wav_bytes_24bit(&samples, sample_rate);
|
||||
let clip = parse_wav(&wav_bytes).expect("parse_wav 24-bit failed");
|
||||
|
||||
assert_eq!(clip.sample_rate, sample_rate);
|
||||
assert_eq!(clip.channels, 1);
|
||||
assert_eq!(clip.frame_count(), num_samples);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_24bit() {
|
||||
let original: Vec<f32> = vec![0.0, 0.25, 0.5, -0.25, -0.5, 1.0, -1.0];
|
||||
let wav_bytes = generate_wav_bytes_24bit(&original, 44100);
|
||||
let clip = parse_wav(&wav_bytes).expect("roundtrip 24-bit parse failed");
|
||||
|
||||
assert_eq!(clip.samples.len(), original.len());
|
||||
for (orig, decoded) in original.iter().zip(clip.samples.iter()) {
|
||||
// 24-bit quantization error should be < 0.0001
|
||||
assert!(
|
||||
(orig - decoded).abs() < 0.0001,
|
||||
"24-bit: orig={} decoded={}",
|
||||
orig,
|
||||
decoded
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accuracy_24bit() {
|
||||
// 24-bit should be more accurate than 16-bit
|
||||
let samples = vec![0.5f32];
|
||||
let wav_bytes = generate_wav_bytes_24bit(&samples, 44100);
|
||||
let clip = parse_wav(&wav_bytes).expect("parse failed");
|
||||
// 0.5 * 8388607 = 4194303 -> 4194303 / 8388608 ≈ 0.49999988
|
||||
assert!((clip.samples[0] - 0.5).abs() < 0.0001, "24-bit got {}", clip.samples[0]);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// 32-bit float tests
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn parse_32bit_float_wav() {
|
||||
let sample_rate = 44100u32;
|
||||
let num_samples = 4410usize;
|
||||
let samples: Vec<f32> = (0..num_samples)
|
||||
.map(|i| (2.0 * std::f32::consts::PI * 440.0 * i as f32 / sample_rate as f32).sin())
|
||||
.collect();
|
||||
|
||||
let wav_bytes = generate_wav_bytes_f32(&samples, sample_rate);
|
||||
let clip = parse_wav(&wav_bytes).expect("parse_wav float32 failed");
|
||||
|
||||
assert_eq!(clip.sample_rate, sample_rate);
|
||||
assert_eq!(clip.channels, 1);
|
||||
assert_eq!(clip.frame_count(), num_samples);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_32bit_float() {
|
||||
let original: Vec<f32> = vec![0.0, 0.25, 0.5, -0.25, -0.5, 1.0, -1.0];
|
||||
let wav_bytes = generate_wav_bytes_f32(&original, 44100);
|
||||
let clip = parse_wav(&wav_bytes).expect("roundtrip float32 parse failed");
|
||||
|
||||
assert_eq!(clip.samples.len(), original.len());
|
||||
for (orig, decoded) in original.iter().zip(clip.samples.iter()) {
|
||||
// 32-bit float should be exact
|
||||
assert_eq!(*orig, *decoded, "float32: orig={} decoded={}", orig, decoded);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accuracy_32bit_float() {
|
||||
// 32-bit float should preserve exact values
|
||||
let samples = vec![0.123456789f32, -0.987654321f32];
|
||||
let wav_bytes = generate_wav_bytes_f32(&samples, 44100);
|
||||
let clip = parse_wav(&wav_bytes).expect("parse failed");
|
||||
assert_eq!(clip.samples[0], 0.123456789f32);
|
||||
assert_eq!(clip.samples[1], -0.987654321f32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_unsupported_format_tag() {
|
||||
// Create a WAV with format_tag=2 (ADPCM), which we don't support
|
||||
let mut wav = generate_wav_bytes(&[0.0], 44100);
|
||||
// format_tag is at byte 20-21 (RIFF(4)+size(4)+WAVE(4)+fmt(4)+chunk_size(4))
|
||||
wav[20] = 2;
|
||||
wav[21] = 0;
|
||||
let result = parse_wav(&wav);
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().contains("Unsupported WAV format tag"));
|
||||
}
|
||||
}
|
||||
|
||||
263
crates/voltex_ecs/src/binary_scene.rs
Normal file
263
crates/voltex_ecs/src/binary_scene.rs
Normal file
@@ -0,0 +1,263 @@
|
||||
/// Binary scene format (.vscn binary).
|
||||
///
|
||||
/// Format:
|
||||
/// Header: "VSCN" (4 bytes) + version u32 LE + entity_count u32 LE
|
||||
/// Per entity:
|
||||
/// parent_index i32 LE (-1 = no parent)
|
||||
/// component_count u32 LE
|
||||
/// Per component:
|
||||
/// name_len u16 LE + name bytes
|
||||
/// data_len u32 LE + data bytes
|
||||
|
||||
use std::collections::HashMap;
|
||||
use crate::entity::Entity;
|
||||
use crate::world::World;
|
||||
use crate::transform::Transform;
|
||||
use crate::hierarchy::{add_child, Parent};
|
||||
use crate::component_registry::ComponentRegistry;
|
||||
|
||||
const MAGIC: &[u8; 4] = b"VSCN";
|
||||
const VERSION: u32 = 1;
|
||||
|
||||
/// Serialize all entities with a Transform to the binary scene format.
|
||||
pub fn serialize_scene_binary(world: &World, registry: &ComponentRegistry) -> Vec<u8> {
|
||||
let entities_with_transform: Vec<(Entity, Transform)> = world
|
||||
.query::<Transform>()
|
||||
.map(|(e, t)| (e, *t))
|
||||
.collect();
|
||||
|
||||
let entity_to_index: HashMap<Entity, usize> = entities_with_transform
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, (e, _))| (*e, i))
|
||||
.collect();
|
||||
|
||||
let entity_count = entities_with_transform.len() as u32;
|
||||
let mut buf = Vec::new();
|
||||
|
||||
// Header
|
||||
buf.extend_from_slice(MAGIC);
|
||||
buf.extend_from_slice(&VERSION.to_le_bytes());
|
||||
buf.extend_from_slice(&entity_count.to_le_bytes());
|
||||
|
||||
// Entities
|
||||
for (entity, _) in &entities_with_transform {
|
||||
// Parent index
|
||||
let parent_idx: i32 = if let Some(parent_comp) = world.get::<Parent>(*entity) {
|
||||
entity_to_index.get(&parent_comp.0)
|
||||
.map(|&i| i as i32)
|
||||
.unwrap_or(-1)
|
||||
} else {
|
||||
-1
|
||||
};
|
||||
buf.extend_from_slice(&parent_idx.to_le_bytes());
|
||||
|
||||
// Collect serializable components
|
||||
let mut comp_data: Vec<(&str, Vec<u8>)> = Vec::new();
|
||||
for entry in registry.entries() {
|
||||
if let Some(data) = (entry.serialize)(world, *entity) {
|
||||
comp_data.push((&entry.name, data));
|
||||
}
|
||||
}
|
||||
|
||||
let comp_count = comp_data.len() as u32;
|
||||
buf.extend_from_slice(&comp_count.to_le_bytes());
|
||||
|
||||
for (name, data) in &comp_data {
|
||||
let name_bytes = name.as_bytes();
|
||||
buf.extend_from_slice(&(name_bytes.len() as u16).to_le_bytes());
|
||||
buf.extend_from_slice(name_bytes);
|
||||
buf.extend_from_slice(&(data.len() as u32).to_le_bytes());
|
||||
buf.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize entities from binary scene data.
|
||||
pub fn deserialize_scene_binary(
|
||||
world: &mut World,
|
||||
data: &[u8],
|
||||
registry: &ComponentRegistry,
|
||||
) -> Result<Vec<Entity>, String> {
|
||||
if data.len() < 12 {
|
||||
return Err("Binary scene data too short".into());
|
||||
}
|
||||
|
||||
// Verify magic
|
||||
if &data[0..4] != MAGIC {
|
||||
return Err("Invalid magic bytes — expected VSCN".into());
|
||||
}
|
||||
|
||||
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
|
||||
if version != 1 {
|
||||
return Err(format!("Unsupported binary scene version: {}", version));
|
||||
}
|
||||
|
||||
let entity_count = u32::from_le_bytes([data[8], data[9], data[10], data[11]]) as usize;
|
||||
let mut pos = 12;
|
||||
|
||||
let mut created: Vec<Entity> = Vec::with_capacity(entity_count);
|
||||
let mut parent_indices: Vec<i32> = Vec::with_capacity(entity_count);
|
||||
|
||||
for _ in 0..entity_count {
|
||||
// Parent index
|
||||
if pos + 4 > data.len() {
|
||||
return Err("Unexpected end of data reading parent index".into());
|
||||
}
|
||||
let parent_idx = i32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]);
|
||||
pos += 4;
|
||||
|
||||
// Component count
|
||||
if pos + 4 > data.len() {
|
||||
return Err("Unexpected end of data reading component count".into());
|
||||
}
|
||||
let comp_count = u32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
|
||||
let entity = world.spawn();
|
||||
|
||||
for _ in 0..comp_count {
|
||||
// Name length
|
||||
if pos + 2 > data.len() {
|
||||
return Err("Unexpected end of data reading name length".into());
|
||||
}
|
||||
let name_len = u16::from_le_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
pos += 2;
|
||||
|
||||
// Name
|
||||
if pos + name_len > data.len() {
|
||||
return Err("Unexpected end of data reading component name".into());
|
||||
}
|
||||
let name = std::str::from_utf8(&data[pos..pos + name_len])
|
||||
.map_err(|_| "Invalid UTF-8 in component name".to_string())?;
|
||||
pos += name_len;
|
||||
|
||||
// Data length
|
||||
if pos + 4 > data.len() {
|
||||
return Err("Unexpected end of data reading data length".into());
|
||||
}
|
||||
let data_len = u32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
|
||||
// Data
|
||||
if pos + data_len > data.len() {
|
||||
return Err("Unexpected end of data reading component data".into());
|
||||
}
|
||||
let comp_data = &data[pos..pos + data_len];
|
||||
pos += data_len;
|
||||
|
||||
// Deserialize via registry
|
||||
if let Some(entry) = registry.find(name) {
|
||||
(entry.deserialize)(world, entity, comp_data)?;
|
||||
}
|
||||
}
|
||||
|
||||
created.push(entity);
|
||||
parent_indices.push(parent_idx);
|
||||
}
|
||||
|
||||
// Apply parent relationships
|
||||
for (child_idx, &parent_idx) in parent_indices.iter().enumerate() {
|
||||
if parent_idx >= 0 {
|
||||
let pi = parent_idx as usize;
|
||||
if pi < created.len() {
|
||||
let child_entity = created[child_idx];
|
||||
let parent_entity = created[pi];
|
||||
add_child(world, parent_entity, child_entity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(created)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::scene::Tag;
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[test]
|
||||
fn test_binary_roundtrip() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(5.0, 0.0, -1.0)));
|
||||
world.add(e, Tag("enemy".into()));
|
||||
|
||||
let data = serialize_scene_binary(&world, ®istry);
|
||||
assert_eq!(&data[0..4], b"VSCN");
|
||||
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_binary(&mut world2, &data, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 1);
|
||||
let t = world2.get::<Transform>(entities[0]).unwrap();
|
||||
assert!((t.position.x - 5.0).abs() < 1e-6);
|
||||
assert!((t.position.z - (-1.0)).abs() < 1e-6);
|
||||
let tag = world2.get::<Tag>(entities[0]).unwrap();
|
||||
assert_eq!(tag.0, "enemy");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_with_hierarchy() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let a = world.spawn();
|
||||
let b = world.spawn();
|
||||
world.add(a, Transform::new());
|
||||
world.add(b, Transform::new());
|
||||
add_child(&mut world, a, b);
|
||||
|
||||
let data = serialize_scene_binary(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_binary(&mut world2, &data, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 2);
|
||||
assert!(world2.get::<Parent>(entities[1]).is_some());
|
||||
let p = world2.get::<Parent>(entities[1]).unwrap();
|
||||
assert_eq!(p.0, entities[0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_invalid_magic() {
|
||||
let data = vec![0u8; 20];
|
||||
let mut world = World::new();
|
||||
let registry = ComponentRegistry::new();
|
||||
assert!(deserialize_scene_binary(&mut world, &data, ®istry).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_too_short() {
|
||||
let data = vec![0u8; 5];
|
||||
let mut world = World::new();
|
||||
let registry = ComponentRegistry::new();
|
||||
assert!(deserialize_scene_binary(&mut world, &data, ®istry).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_multiple_entities() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
for i in 0..3 {
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(i as f32, 0.0, 0.0)));
|
||||
world.add(e, Tag(format!("e{}", i)));
|
||||
}
|
||||
|
||||
let data = serialize_scene_binary(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_binary(&mut world2, &data, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 3);
|
||||
|
||||
for (i, &e) in entities.iter().enumerate() {
|
||||
let t = world2.get::<Transform>(e).unwrap();
|
||||
assert!((t.position.x - i as f32).abs() < 1e-6);
|
||||
let tag = world2.get::<Tag>(e).unwrap();
|
||||
assert_eq!(tag.0, format!("e{}", i));
|
||||
}
|
||||
}
|
||||
}
|
||||
198
crates/voltex_ecs/src/component_registry.rs
Normal file
198
crates/voltex_ecs/src/component_registry.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
/// Registration-based component serialization for scene formats.
|
||||
/// Each registered component type has a name, a serialize function,
|
||||
/// and a deserialize function.
|
||||
|
||||
use crate::entity::Entity;
|
||||
use crate::world::World;
|
||||
|
||||
pub type SerializeFn = fn(&World, Entity) -> Option<Vec<u8>>;
|
||||
pub type DeserializeFn = fn(&mut World, Entity, &[u8]) -> Result<(), String>;
|
||||
|
||||
pub struct ComponentEntry {
|
||||
pub name: String,
|
||||
pub serialize: SerializeFn,
|
||||
pub deserialize: DeserializeFn,
|
||||
}
|
||||
|
||||
pub struct ComponentRegistry {
|
||||
entries: Vec<ComponentEntry>,
|
||||
}
|
||||
|
||||
impl ComponentRegistry {
|
||||
pub fn new() -> Self {
|
||||
Self { entries: Vec::new() }
|
||||
}
|
||||
|
||||
pub fn register(&mut self, name: &str, ser: SerializeFn, deser: DeserializeFn) {
|
||||
self.entries.push(ComponentEntry {
|
||||
name: name.to_string(),
|
||||
serialize: ser,
|
||||
deserialize: deser,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn find(&self, name: &str) -> Option<&ComponentEntry> {
|
||||
self.entries.iter().find(|e| e.name == name)
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> &[ComponentEntry] {
|
||||
&self.entries
|
||||
}
|
||||
|
||||
/// Register the default built-in component types: transform and tag.
|
||||
pub fn register_defaults(&mut self) {
|
||||
self.register("transform", serialize_transform, deserialize_transform);
|
||||
self.register("tag", serialize_tag, deserialize_tag);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ComponentRegistry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Transform: 9 f32s in little-endian (pos.xyz, rot.xyz, scale.xyz) ─
|
||||
|
||||
fn serialize_transform(world: &World, entity: Entity) -> Option<Vec<u8>> {
|
||||
let t = world.get::<crate::Transform>(entity)?;
|
||||
let mut data = Vec::with_capacity(36);
|
||||
for &v in &[
|
||||
t.position.x, t.position.y, t.position.z,
|
||||
t.rotation.x, t.rotation.y, t.rotation.z,
|
||||
t.scale.x, t.scale.y, t.scale.z,
|
||||
] {
|
||||
data.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
Some(data)
|
||||
}
|
||||
|
||||
fn deserialize_transform(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
|
||||
if data.len() < 36 {
|
||||
return Err("Transform data too short".into());
|
||||
}
|
||||
let f = |off: usize| f32::from_le_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]]);
|
||||
let t = crate::Transform {
|
||||
position: voltex_math::Vec3::new(f(0), f(4), f(8)),
|
||||
rotation: voltex_math::Vec3::new(f(12), f(16), f(20)),
|
||||
scale: voltex_math::Vec3::new(f(24), f(28), f(32)),
|
||||
};
|
||||
world.add(entity, t);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Tag: UTF-8 string bytes ─────────────────────────────────────────
|
||||
|
||||
fn serialize_tag(world: &World, entity: Entity) -> Option<Vec<u8>> {
|
||||
let tag = world.get::<crate::scene::Tag>(entity)?;
|
||||
Some(tag.0.as_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn deserialize_tag(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
|
||||
let s = std::str::from_utf8(data).map_err(|_| "Invalid UTF-8 in tag".to_string())?;
|
||||
world.add(entity, crate::scene::Tag(s.to_string()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{World, Transform};
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[test]
|
||||
fn test_register_and_find() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
assert!(registry.find("transform").is_some());
|
||||
assert!(registry.find("tag").is_some());
|
||||
assert!(registry.find("nonexistent").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entries_count() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
assert_eq!(registry.entries().len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_transform() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
|
||||
let entry = registry.find("transform").unwrap();
|
||||
let data = (entry.serialize)(&world, e);
|
||||
assert!(data.is_some());
|
||||
assert_eq!(data.unwrap().len(), 36); // 9 f32s * 4 bytes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_missing_component() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
// no Transform added
|
||||
|
||||
let entry = registry.find("transform").unwrap();
|
||||
assert!((entry.serialize)(&world, e).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_transform() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform {
|
||||
position: Vec3::new(1.0, 2.0, 3.0),
|
||||
rotation: Vec3::new(0.1, 0.2, 0.3),
|
||||
scale: Vec3::new(4.0, 5.0, 6.0),
|
||||
});
|
||||
|
||||
let entry = registry.find("transform").unwrap();
|
||||
let data = (entry.serialize)(&world, e).unwrap();
|
||||
|
||||
let mut world2 = World::new();
|
||||
let e2 = world2.spawn();
|
||||
(entry.deserialize)(&mut world2, e2, &data).unwrap();
|
||||
|
||||
let t = world2.get::<Transform>(e2).unwrap();
|
||||
assert!((t.position.x - 1.0).abs() < 1e-6);
|
||||
assert!((t.position.y - 2.0).abs() < 1e-6);
|
||||
assert!((t.position.z - 3.0).abs() < 1e-6);
|
||||
assert!((t.rotation.x - 0.1).abs() < 1e-6);
|
||||
assert!((t.scale.x - 4.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_tag() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, crate::scene::Tag("hello world".to_string()));
|
||||
|
||||
let entry = registry.find("tag").unwrap();
|
||||
let data = (entry.serialize)(&world, e).unwrap();
|
||||
|
||||
let mut world2 = World::new();
|
||||
let e2 = world2.spawn();
|
||||
(entry.deserialize)(&mut world2, e2, &data).unwrap();
|
||||
|
||||
let tag = world2.get::<crate::scene::Tag>(e2).unwrap();
|
||||
assert_eq!(tag.0, "hello world");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_transform_too_short() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
let result = deserialize_transform(&mut world, e, &[0u8; 10]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
476
crates/voltex_ecs/src/json.rs
Normal file
476
crates/voltex_ecs/src/json.rs
Normal file
@@ -0,0 +1,476 @@
|
||||
/// Mini JSON writer and parser for scene serialization.
|
||||
/// No external dependencies — self-contained within voltex_ecs.
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum JsonVal {
|
||||
Null,
|
||||
Bool(bool),
|
||||
Number(f64),
|
||||
Str(String),
|
||||
Array(Vec<JsonVal>),
|
||||
Object(Vec<(String, JsonVal)>),
|
||||
}
|
||||
|
||||
// ── Writer helpers ──────────────────────────────────────────────────
|
||||
|
||||
pub fn json_write_null() -> String {
|
||||
"null".to_string()
|
||||
}
|
||||
|
||||
pub fn json_write_f32(v: f32) -> String {
|
||||
// Emit integer form when the value has no fractional part
|
||||
if v.fract() == 0.0 && v.abs() < 1e15 {
|
||||
format!("{}", v as i64)
|
||||
} else {
|
||||
format!("{}", v)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn json_write_string(s: &str) -> String {
|
||||
let mut out = String::with_capacity(s.len() + 2);
|
||||
out.push('"');
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'"' => out.push_str("\\\""),
|
||||
'\\' => out.push_str("\\\\"),
|
||||
'\n' => out.push_str("\\n"),
|
||||
'\r' => out.push_str("\\r"),
|
||||
'\t' => out.push_str("\\t"),
|
||||
_ => out.push(c),
|
||||
}
|
||||
}
|
||||
out.push('"');
|
||||
out
|
||||
}
|
||||
|
||||
/// Write a JSON array from pre-formatted element strings.
|
||||
pub fn json_write_array(elements: &[&str]) -> String {
|
||||
let mut out = String::from("[");
|
||||
for (i, elem) in elements.iter().enumerate() {
|
||||
if i > 0 {
|
||||
out.push(',');
|
||||
}
|
||||
out.push_str(elem);
|
||||
}
|
||||
out.push(']');
|
||||
out
|
||||
}
|
||||
|
||||
/// Write a JSON object from (key, pre-formatted-value) pairs.
|
||||
pub fn json_write_object(pairs: &[(&str, &str)]) -> String {
|
||||
let mut out = String::from("{");
|
||||
for (i, (key, val)) in pairs.iter().enumerate() {
|
||||
if i > 0 {
|
||||
out.push(',');
|
||||
}
|
||||
out.push_str(&json_write_string(key));
|
||||
out.push(':');
|
||||
out.push_str(val);
|
||||
}
|
||||
out.push('}');
|
||||
out
|
||||
}
|
||||
|
||||
// ── Accessors on JsonVal ────────────────────────────────────────────
|
||||
|
||||
impl JsonVal {
|
||||
pub fn get(&self, key: &str) -> Option<&JsonVal> {
|
||||
match self {
|
||||
JsonVal::Object(pairs) => pairs.iter().find(|(k, _)| k == key).map(|(_, v)| v),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_f64(&self) -> Option<f64> {
|
||||
match self {
|
||||
JsonVal::Number(n) => Some(*n),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self {
|
||||
JsonVal::Str(s) => Some(s.as_str()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_array(&self) -> Option<&Vec<JsonVal>> {
|
||||
match self {
|
||||
JsonVal::Array(a) => Some(a),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_object(&self) -> Option<&Vec<(String, JsonVal)>> {
|
||||
match self {
|
||||
JsonVal::Object(o) => Some(o),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_bool(&self) -> Option<bool> {
|
||||
match self {
|
||||
JsonVal::Bool(b) => Some(*b),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Parser (recursive descent) ──────────────────────────────────────
|
||||
|
||||
pub fn json_parse(input: &str) -> Result<JsonVal, String> {
|
||||
let bytes = input.as_bytes();
|
||||
let (val, pos) = parse_value(bytes, skip_ws(bytes, 0))?;
|
||||
let pos = skip_ws(bytes, pos);
|
||||
if pos != bytes.len() {
|
||||
return Err(format!("Unexpected trailing content at position {}", pos));
|
||||
}
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
fn skip_ws(b: &[u8], mut pos: usize) -> usize {
|
||||
while pos < b.len() && matches!(b[pos], b' ' | b'\t' | b'\n' | b'\r') {
|
||||
pos += 1;
|
||||
}
|
||||
pos
|
||||
}
|
||||
|
||||
fn parse_value(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
if pos >= b.len() {
|
||||
return Err("Unexpected end of input".into());
|
||||
}
|
||||
match b[pos] {
|
||||
b'"' => parse_string(b, pos),
|
||||
b'{' => parse_object(b, pos),
|
||||
b'[' => parse_array(b, pos),
|
||||
b't' | b'f' => parse_bool(b, pos),
|
||||
b'n' => parse_null(b, pos),
|
||||
_ => parse_number(b, pos),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_string(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
let (s, end) = read_string(b, pos)?;
|
||||
Ok((JsonVal::Str(s), end))
|
||||
}
|
||||
|
||||
fn read_string(b: &[u8], pos: usize) -> Result<(String, usize), String> {
|
||||
if pos >= b.len() || b[pos] != b'"' {
|
||||
return Err(format!("Expected '\"' at position {}", pos));
|
||||
}
|
||||
let mut i = pos + 1;
|
||||
let mut s = String::new();
|
||||
while i < b.len() {
|
||||
match b[i] {
|
||||
b'"' => return Ok((s, i + 1)),
|
||||
b'\\' => {
|
||||
i += 1;
|
||||
if i >= b.len() {
|
||||
return Err("Unexpected end in string escape".into());
|
||||
}
|
||||
match b[i] {
|
||||
b'"' => s.push('"'),
|
||||
b'\\' => s.push('\\'),
|
||||
b'/' => s.push('/'),
|
||||
b'n' => s.push('\n'),
|
||||
b'r' => s.push('\r'),
|
||||
b't' => s.push('\t'),
|
||||
_ => {
|
||||
s.push('\\');
|
||||
s.push(b[i] as char);
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
ch => {
|
||||
s.push(ch as char);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err("Unterminated string".into())
|
||||
}
|
||||
|
||||
fn parse_number(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
let mut i = pos;
|
||||
// optional minus
|
||||
if i < b.len() && b[i] == b'-' {
|
||||
i += 1;
|
||||
}
|
||||
// digits
|
||||
while i < b.len() && b[i].is_ascii_digit() {
|
||||
i += 1;
|
||||
}
|
||||
// fractional
|
||||
if i < b.len() && b[i] == b'.' {
|
||||
i += 1;
|
||||
while i < b.len() && b[i].is_ascii_digit() {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
// exponent
|
||||
if i < b.len() && (b[i] == b'e' || b[i] == b'E') {
|
||||
i += 1;
|
||||
if i < b.len() && (b[i] == b'+' || b[i] == b'-') {
|
||||
i += 1;
|
||||
}
|
||||
while i < b.len() && b[i].is_ascii_digit() {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
if i == pos {
|
||||
return Err(format!("Expected number at position {}", pos));
|
||||
}
|
||||
let s = std::str::from_utf8(&b[pos..i]).unwrap();
|
||||
let n: f64 = s.parse().map_err(|e| format!("Invalid number '{}': {}", s, e))?;
|
||||
Ok((JsonVal::Number(n), i))
|
||||
}
|
||||
|
||||
fn parse_bool(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
if b[pos..].starts_with(b"true") {
|
||||
Ok((JsonVal::Bool(true), pos + 4))
|
||||
} else if b[pos..].starts_with(b"false") {
|
||||
Ok((JsonVal::Bool(false), pos + 5))
|
||||
} else {
|
||||
Err(format!("Expected bool at position {}", pos))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_null(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
if b[pos..].starts_with(b"null") {
|
||||
Ok((JsonVal::Null, pos + 4))
|
||||
} else {
|
||||
Err(format!("Expected null at position {}", pos))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_array(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
let mut i = pos + 1; // skip '['
|
||||
let mut arr = Vec::new();
|
||||
i = skip_ws(b, i);
|
||||
if i < b.len() && b[i] == b']' {
|
||||
return Ok((JsonVal::Array(arr), i + 1));
|
||||
}
|
||||
loop {
|
||||
i = skip_ws(b, i);
|
||||
let (val, next) = parse_value(b, i)?;
|
||||
arr.push(val);
|
||||
i = skip_ws(b, next);
|
||||
if i >= b.len() {
|
||||
return Err("Unterminated array".into());
|
||||
}
|
||||
if b[i] == b']' {
|
||||
return Ok((JsonVal::Array(arr), i + 1));
|
||||
}
|
||||
if b[i] != b',' {
|
||||
return Err(format!("Expected ',' or ']' at position {}", i));
|
||||
}
|
||||
i += 1; // skip ','
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_object(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
|
||||
let mut i = pos + 1; // skip '{'
|
||||
let mut pairs = Vec::new();
|
||||
i = skip_ws(b, i);
|
||||
if i < b.len() && b[i] == b'}' {
|
||||
return Ok((JsonVal::Object(pairs), i + 1));
|
||||
}
|
||||
loop {
|
||||
i = skip_ws(b, i);
|
||||
let (key, next) = read_string(b, i)?;
|
||||
i = skip_ws(b, next);
|
||||
if i >= b.len() || b[i] != b':' {
|
||||
return Err(format!("Expected ':' at position {}", i));
|
||||
}
|
||||
i = skip_ws(b, i + 1);
|
||||
let (val, next) = parse_value(b, i)?;
|
||||
pairs.push((key, val));
|
||||
i = skip_ws(b, next);
|
||||
if i >= b.len() {
|
||||
return Err("Unterminated object".into());
|
||||
}
|
||||
if b[i] == b'}' {
|
||||
return Ok((JsonVal::Object(pairs), i + 1));
|
||||
}
|
||||
if b[i] != b',' {
|
||||
return Err(format!("Expected ',' or '}}' at position {}", i));
|
||||
}
|
||||
i += 1; // skip ','
|
||||
}
|
||||
}
|
||||
|
||||
// ── JsonVal -> String serialization ─────────────────────────────────
|
||||
|
||||
impl JsonVal {
|
||||
/// Serialize this JsonVal to a compact JSON string.
|
||||
pub fn to_json_string(&self) -> String {
|
||||
match self {
|
||||
JsonVal::Null => "null".to_string(),
|
||||
JsonVal::Bool(b) => if *b { "true" } else { "false" }.to_string(),
|
||||
JsonVal::Number(n) => {
|
||||
if n.fract() == 0.0 && n.abs() < 1e15 {
|
||||
format!("{}", *n as i64)
|
||||
} else {
|
||||
format!("{}", n)
|
||||
}
|
||||
}
|
||||
JsonVal::Str(s) => json_write_string(s),
|
||||
JsonVal::Array(arr) => {
|
||||
let mut out = String::from("[");
|
||||
for (i, v) in arr.iter().enumerate() {
|
||||
if i > 0 {
|
||||
out.push(',');
|
||||
}
|
||||
out.push_str(&v.to_json_string());
|
||||
}
|
||||
out.push(']');
|
||||
out
|
||||
}
|
||||
JsonVal::Object(pairs) => {
|
||||
let mut out = String::from("{");
|
||||
for (i, (k, v)) in pairs.iter().enumerate() {
|
||||
if i > 0 {
|
||||
out.push(',');
|
||||
}
|
||||
out.push_str(&json_write_string(k));
|
||||
out.push(':');
|
||||
out.push_str(&v.to_json_string());
|
||||
}
|
||||
out.push('}');
|
||||
out
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_write_null() {
|
||||
assert_eq!(json_write_null(), "null");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_number() {
|
||||
assert_eq!(json_write_f32(3.14), "3.14");
|
||||
assert_eq!(json_write_f32(1.0), "1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_string() {
|
||||
assert_eq!(json_write_string("hello"), "\"hello\"");
|
||||
assert_eq!(json_write_string("a\"b"), "\"a\\\"b\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_array() {
|
||||
assert_eq!(json_write_array(&["1", "2", "3"]), "[1,2,3]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_object() {
|
||||
let pairs = vec![("name", "\"test\""), ("value", "42")];
|
||||
let result = json_write_object(&pairs);
|
||||
assert_eq!(result, r#"{"name":"test","value":42}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_number() {
|
||||
match json_parse("42.5").unwrap() {
|
||||
JsonVal::Number(n) => assert!((n - 42.5).abs() < 1e-10),
|
||||
_ => panic!("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_negative_number() {
|
||||
match json_parse("-3.14").unwrap() {
|
||||
JsonVal::Number(n) => assert!((n - (-3.14)).abs() < 1e-10),
|
||||
_ => panic!("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string() {
|
||||
assert_eq!(json_parse("\"hello\"").unwrap(), JsonVal::Str("hello".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string_with_escapes() {
|
||||
assert_eq!(
|
||||
json_parse(r#""a\"b\\c""#).unwrap(),
|
||||
JsonVal::Str("a\"b\\c".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_array() {
|
||||
match json_parse("[1,2,3]").unwrap() {
|
||||
JsonVal::Array(a) => assert_eq!(a.len(), 3),
|
||||
_ => panic!("expected array"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_array() {
|
||||
match json_parse("[]").unwrap() {
|
||||
JsonVal::Array(a) => assert_eq!(a.len(), 0),
|
||||
_ => panic!("expected array"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_object() {
|
||||
let val = json_parse(r#"{"x":1,"y":2}"#).unwrap();
|
||||
assert!(matches!(val, JsonVal::Object(_)));
|
||||
assert_eq!(val.get("x").unwrap().as_f64().unwrap(), 1.0);
|
||||
assert_eq!(val.get("y").unwrap().as_f64().unwrap(), 2.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_null() {
|
||||
assert_eq!(json_parse("null").unwrap(), JsonVal::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bool() {
|
||||
assert_eq!(json_parse("true").unwrap(), JsonVal::Bool(true));
|
||||
assert_eq!(json_parse("false").unwrap(), JsonVal::Bool(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_nested() {
|
||||
let val = json_parse(r#"{"a":[1,2],"b":{"c":3}}"#).unwrap();
|
||||
assert!(matches!(val, JsonVal::Object(_)));
|
||||
let arr = val.get("a").unwrap().as_array().unwrap();
|
||||
assert_eq!(arr.len(), 2);
|
||||
let inner = val.get("b").unwrap();
|
||||
assert_eq!(inner.get("c").unwrap().as_f64().unwrap(), 3.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_whitespace() {
|
||||
let val = json_parse(" { \"a\" : 1 , \"b\" : [ 2 , 3 ] } ").unwrap();
|
||||
assert!(matches!(val, JsonVal::Object(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_val_to_json_string_roundtrip() {
|
||||
let val = JsonVal::Object(vec![
|
||||
("name".into(), JsonVal::Str("test".into())),
|
||||
("count".into(), JsonVal::Number(42.0)),
|
||||
("items".into(), JsonVal::Array(vec![
|
||||
JsonVal::Number(1.0),
|
||||
JsonVal::Null,
|
||||
JsonVal::Bool(true),
|
||||
])),
|
||||
]);
|
||||
let s = val.to_json_string();
|
||||
let parsed = json_parse(&s).unwrap();
|
||||
assert_eq!(parsed, val);
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,10 @@ pub mod transform;
|
||||
pub mod hierarchy;
|
||||
pub mod world_transform;
|
||||
pub mod scene;
|
||||
pub mod scheduler;
|
||||
pub mod json;
|
||||
pub mod component_registry;
|
||||
pub mod binary_scene;
|
||||
|
||||
pub use entity::{Entity, EntityAllocator};
|
||||
pub use sparse_set::SparseSet;
|
||||
@@ -12,4 +16,7 @@ pub use world::World;
|
||||
pub use transform::Transform;
|
||||
pub use hierarchy::{Parent, Children, add_child, remove_child, despawn_recursive, roots};
|
||||
pub use world_transform::{WorldTransform, propagate_transforms};
|
||||
pub use scene::{Tag, serialize_scene, deserialize_scene};
|
||||
pub use scene::{Tag, serialize_scene, deserialize_scene, serialize_scene_json, deserialize_scene_json};
|
||||
pub use scheduler::{Scheduler, System};
|
||||
pub use component_registry::ComponentRegistry;
|
||||
pub use binary_scene::{serialize_scene_binary, deserialize_scene_binary};
|
||||
|
||||
@@ -4,6 +4,8 @@ use crate::entity::Entity;
|
||||
use crate::world::World;
|
||||
use crate::transform::Transform;
|
||||
use crate::hierarchy::{add_child, Parent};
|
||||
use crate::component_registry::ComponentRegistry;
|
||||
use crate::json::{self, JsonVal};
|
||||
|
||||
/// String tag for entity identification.
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -152,10 +154,160 @@ pub fn deserialize_scene(world: &mut World, source: &str) -> Vec<Entity> {
|
||||
created
|
||||
}
|
||||
|
||||
// ── Hex encoding helpers ────────────────────────────────────────────
|
||||
|
||||
fn bytes_to_hex(data: &[u8]) -> String {
|
||||
let mut s = String::with_capacity(data.len() * 2);
|
||||
for &b in data {
|
||||
s.push_str(&format!("{:02x}", b));
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
fn hex_to_bytes(hex: &str) -> Result<Vec<u8>, String> {
|
||||
if hex.len() % 2 != 0 {
|
||||
return Err("Hex string has odd length".into());
|
||||
}
|
||||
let mut bytes = Vec::with_capacity(hex.len() / 2);
|
||||
let mut i = 0;
|
||||
let chars: Vec<u8> = hex.bytes().collect();
|
||||
while i < chars.len() {
|
||||
let hi = hex_digit(chars[i])?;
|
||||
let lo = hex_digit(chars[i + 1])?;
|
||||
bytes.push((hi << 4) | lo);
|
||||
i += 2;
|
||||
}
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
fn hex_digit(c: u8) -> Result<u8, String> {
|
||||
match c {
|
||||
b'0'..=b'9' => Ok(c - b'0'),
|
||||
b'a'..=b'f' => Ok(c - b'a' + 10),
|
||||
b'A'..=b'F' => Ok(c - b'A' + 10),
|
||||
_ => Err(format!("Invalid hex digit: {}", c as char)),
|
||||
}
|
||||
}
|
||||
|
||||
// ── JSON scene serialization ────────────────────────────────────────
|
||||
|
||||
/// Serialize all entities with a Transform to JSON format using the component registry.
|
||||
/// Format: {"version":1,"entities":[{"parent":null_or_idx,"components":{"name":"hex",...}}]}
|
||||
pub fn serialize_scene_json(world: &World, registry: &ComponentRegistry) -> String {
|
||||
let entities_with_transform: Vec<(Entity, Transform)> = world
|
||||
.query::<Transform>()
|
||||
.map(|(e, t)| (e, *t))
|
||||
.collect();
|
||||
|
||||
let entity_to_index: HashMap<Entity, usize> = entities_with_transform
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, (e, _))| (*e, i))
|
||||
.collect();
|
||||
|
||||
// Build entity JSON values
|
||||
let mut entity_vals = Vec::new();
|
||||
for (entity, _) in &entities_with_transform {
|
||||
// Parent index
|
||||
let parent_val = if let Some(parent_comp) = world.get::<Parent>(*entity) {
|
||||
if let Some(&idx) = entity_to_index.get(&parent_comp.0) {
|
||||
JsonVal::Number(idx as f64)
|
||||
} else {
|
||||
JsonVal::Null
|
||||
}
|
||||
} else {
|
||||
JsonVal::Null
|
||||
};
|
||||
|
||||
// Components
|
||||
let mut comp_pairs = Vec::new();
|
||||
for entry in registry.entries() {
|
||||
if let Some(data) = (entry.serialize)(world, *entity) {
|
||||
comp_pairs.push((entry.name.clone(), JsonVal::Str(bytes_to_hex(&data))));
|
||||
}
|
||||
}
|
||||
|
||||
entity_vals.push(JsonVal::Object(vec![
|
||||
("parent".into(), parent_val),
|
||||
("components".into(), JsonVal::Object(comp_pairs)),
|
||||
]));
|
||||
}
|
||||
|
||||
let root = JsonVal::Object(vec![
|
||||
("version".into(), JsonVal::Number(1.0)),
|
||||
("entities".into(), JsonVal::Array(entity_vals)),
|
||||
]);
|
||||
|
||||
root.to_json_string()
|
||||
}
|
||||
|
||||
/// Deserialize entities from a JSON scene string.
|
||||
pub fn deserialize_scene_json(
|
||||
world: &mut World,
|
||||
json_str: &str,
|
||||
registry: &ComponentRegistry,
|
||||
) -> Result<Vec<Entity>, String> {
|
||||
let root = json::json_parse(json_str)?;
|
||||
|
||||
let version = root.get("version")
|
||||
.and_then(|v| v.as_f64())
|
||||
.ok_or("Missing or invalid 'version'")?;
|
||||
if version as u32 != 1 {
|
||||
return Err(format!("Unsupported version: {}", version));
|
||||
}
|
||||
|
||||
let entities_arr = root.get("entities")
|
||||
.and_then(|v| v.as_array())
|
||||
.ok_or("Missing or invalid 'entities'")?;
|
||||
|
||||
// First pass: create entities and deserialize components
|
||||
let mut created: Vec<Entity> = Vec::with_capacity(entities_arr.len());
|
||||
let mut parent_indices: Vec<Option<usize>> = Vec::with_capacity(entities_arr.len());
|
||||
|
||||
for entity_val in entities_arr {
|
||||
let entity = world.spawn();
|
||||
|
||||
// Parse parent index
|
||||
let parent_idx = match entity_val.get("parent") {
|
||||
Some(JsonVal::Number(n)) => Some(*n as usize),
|
||||
_ => None,
|
||||
};
|
||||
parent_indices.push(parent_idx);
|
||||
|
||||
// Deserialize components
|
||||
if let Some(comps) = entity_val.get("components").and_then(|v| v.as_object()) {
|
||||
for (name, hex_val) in comps {
|
||||
if let Some(hex_str) = hex_val.as_str() {
|
||||
let data = hex_to_bytes(hex_str)?;
|
||||
if let Some(entry) = registry.find(name) {
|
||||
(entry.deserialize)(world, entity, &data)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
created.push(entity);
|
||||
}
|
||||
|
||||
// Second pass: apply parent relationships
|
||||
for (child_idx, parent_idx_opt) in parent_indices.iter().enumerate() {
|
||||
if let Some(parent_idx) = parent_idx_opt {
|
||||
if *parent_idx < created.len() {
|
||||
let child_entity = created[child_idx];
|
||||
let parent_entity = created[*parent_idx];
|
||||
add_child(world, parent_entity, child_entity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(created)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::hierarchy::{add_child, roots, Parent};
|
||||
use crate::component_registry::ComponentRegistry;
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[test]
|
||||
@@ -270,4 +422,75 @@ entity 2
|
||||
let scene_roots = roots(&world);
|
||||
assert_eq!(scene_roots.len(), 2, "should have exactly 2 root entities");
|
||||
}
|
||||
|
||||
// ── JSON scene tests ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_hex_roundtrip() {
|
||||
let data = vec![0u8, 1, 15, 16, 255];
|
||||
let hex = bytes_to_hex(&data);
|
||||
assert_eq!(hex, "00010f10ff");
|
||||
let back = hex_to_bytes(&hex).unwrap();
|
||||
assert_eq!(back, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_roundtrip() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
world.add(e, Tag("player".into()));
|
||||
|
||||
let json = serialize_scene_json(&world, ®istry);
|
||||
assert!(json.contains("\"version\":1"));
|
||||
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_json(&mut world2, &json, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 1);
|
||||
let t = world2.get::<Transform>(entities[0]).unwrap();
|
||||
assert!((t.position.x - 1.0).abs() < 1e-4);
|
||||
assert!((t.position.y - 2.0).abs() < 1e-4);
|
||||
assert!((t.position.z - 3.0).abs() < 1e-4);
|
||||
let tag = world2.get::<Tag>(entities[0]).unwrap();
|
||||
assert_eq!(tag.0, "player");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_with_parent() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let parent = world.spawn();
|
||||
let child = world.spawn();
|
||||
world.add(parent, Transform::new());
|
||||
world.add(child, Transform::new());
|
||||
add_child(&mut world, parent, child);
|
||||
|
||||
let json = serialize_scene_json(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_json(&mut world2, &json, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 2);
|
||||
assert!(world2.get::<Parent>(entities[1]).is_some());
|
||||
let parent_comp = world2.get::<Parent>(entities[1]).unwrap();
|
||||
assert_eq!(parent_comp.0, entities[0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_multiple_entities() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
for i in 0..5 {
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(i as f32, 0.0, 0.0)));
|
||||
world.add(e, Tag(format!("entity_{}", i)));
|
||||
}
|
||||
|
||||
let json = serialize_scene_json(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_json(&mut world2, &json, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 5);
|
||||
}
|
||||
}
|
||||
|
||||
121
crates/voltex_ecs/src/scheduler.rs
Normal file
121
crates/voltex_ecs/src/scheduler.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
use crate::World;
|
||||
|
||||
/// A system that can be run on the world.
|
||||
pub trait System {
|
||||
fn run(&mut self, world: &mut World);
|
||||
}
|
||||
|
||||
/// Blanket impl: any FnMut(&mut World) is a System.
|
||||
impl<F: FnMut(&mut World)> System for F {
|
||||
fn run(&mut self, world: &mut World) {
|
||||
(self)(world);
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs registered systems in order.
|
||||
pub struct Scheduler {
|
||||
systems: Vec<Box<dyn System>>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self {
|
||||
Self { systems: Vec::new() }
|
||||
}
|
||||
|
||||
/// Add a system. Systems run in the order they are added.
|
||||
pub fn add<S: System + 'static>(&mut self, system: S) -> &mut Self {
|
||||
self.systems.push(Box::new(system));
|
||||
self
|
||||
}
|
||||
|
||||
/// Run all systems in registration order.
|
||||
pub fn run_all(&mut self, world: &mut World) {
|
||||
for system in &mut self.systems {
|
||||
system.run(world);
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of registered systems.
|
||||
pub fn len(&self) -> usize {
|
||||
self.systems.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.systems.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Scheduler {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::World;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Counter(u32);
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_runs_in_order() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Counter(0));
|
||||
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.add(|world: &mut World| {
|
||||
let e = world.query::<Counter>().next().unwrap().0;
|
||||
let c = world.get_mut::<Counter>(e).unwrap();
|
||||
c.0 += 1; // 0 -> 1
|
||||
});
|
||||
scheduler.add(|world: &mut World| {
|
||||
let e = world.query::<Counter>().next().unwrap().0;
|
||||
let c = world.get_mut::<Counter>(e).unwrap();
|
||||
c.0 *= 10; // 1 -> 10
|
||||
});
|
||||
|
||||
scheduler.run_all(&mut world);
|
||||
|
||||
let c = world.get::<Counter>(e).unwrap();
|
||||
assert_eq!(c.0, 10); // proves order: add first, then multiply
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_empty() {
|
||||
let mut world = World::new();
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.run_all(&mut world); // should not panic
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_multiple_runs() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Counter(0));
|
||||
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.add(|world: &mut World| {
|
||||
let e = world.query::<Counter>().next().unwrap().0;
|
||||
let c = world.get_mut::<Counter>(e).unwrap();
|
||||
c.0 += 1;
|
||||
});
|
||||
|
||||
scheduler.run_all(&mut world);
|
||||
scheduler.run_all(&mut world);
|
||||
scheduler.run_all(&mut world);
|
||||
|
||||
assert_eq!(world.get::<Counter>(e).unwrap().0, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_add_chaining() {
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler
|
||||
.add(|_: &mut World| {})
|
||||
.add(|_: &mut World| {});
|
||||
assert_eq!(scheduler.len(), 2);
|
||||
}
|
||||
}
|
||||
@@ -227,6 +227,54 @@ impl World {
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub fn has_component<T: 'static>(&self, entity: Entity) -> bool {
|
||||
self.storage::<T>().map_or(false, |s| s.contains(entity))
|
||||
}
|
||||
|
||||
/// Query entities that have component T AND also have component W.
|
||||
pub fn query_with<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
|
||||
let t_storage = match self.storage::<T>() {
|
||||
Some(s) => s,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let mut result = Vec::new();
|
||||
for (entity, data) in t_storage.iter() {
|
||||
if self.has_component::<W>(entity) {
|
||||
result.push((entity, data));
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Query entities that have component T but NOT component W.
|
||||
pub fn query_without<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
|
||||
let t_storage = match self.storage::<T>() {
|
||||
Some(s) => s,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let mut result = Vec::new();
|
||||
for (entity, data) in t_storage.iter() {
|
||||
if !self.has_component::<W>(entity) {
|
||||
result.push((entity, data));
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Query entities with components A and B, that also have component W.
|
||||
pub fn query2_with<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
|
||||
self.query2::<A, B>().into_iter()
|
||||
.filter(|(e, _, _)| self.has_component::<W>(*e))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Query entities with components A and B, that do NOT have component W.
|
||||
pub fn query2_without<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
|
||||
self.query2::<A, B>().into_iter()
|
||||
.filter(|(e, _, _)| !self.has_component::<W>(*e))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for World {
|
||||
@@ -388,6 +436,96 @@ mod tests {
|
||||
assert_eq!(results[0].0, e0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_has_component() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Position { x: 1.0, y: 2.0 });
|
||||
assert!(world.has_component::<Position>(e));
|
||||
assert!(!world.has_component::<Velocity>(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_with() {
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
let e1 = world.spawn();
|
||||
let e2 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
// e1 has Position but no Velocity
|
||||
world.add(e2, Position { x: 3.0, y: 0.0 });
|
||||
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
|
||||
|
||||
let results = world.query_with::<Position, Velocity>();
|
||||
assert_eq!(results.len(), 2);
|
||||
let entities: Vec<Entity> = results.iter().map(|(e, _)| *e).collect();
|
||||
assert!(entities.contains(&e0));
|
||||
assert!(entities.contains(&e2));
|
||||
assert!(!entities.contains(&e1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_without() {
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
let e1 = world.spawn();
|
||||
let e2 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
// e1 has Position but no Velocity — should be included
|
||||
world.add(e2, Position { x: 3.0, y: 0.0 });
|
||||
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
|
||||
|
||||
let results = world.query_without::<Position, Velocity>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query2_with() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Health(i32);
|
||||
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e0, Health(100));
|
||||
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
|
||||
// e1 has no Health
|
||||
|
||||
let results = world.query2_with::<Position, Velocity, Health>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query2_without() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Health(i32);
|
||||
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e0, Health(100));
|
||||
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
|
||||
// e1 has no Health
|
||||
|
||||
let results = world.query2_without::<Position, Velocity, Health>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_count() {
|
||||
let mut world = World::new();
|
||||
|
||||
@@ -9,15 +9,27 @@ pub struct DrawVertex {
|
||||
pub color: [u8; 4],
|
||||
}
|
||||
|
||||
/// A scissor rectangle for content clipping, in pixel coordinates.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct ScissorRect {
|
||||
pub x: u32,
|
||||
pub y: u32,
|
||||
pub w: u32,
|
||||
pub h: u32,
|
||||
}
|
||||
|
||||
pub struct DrawCommand {
|
||||
pub index_offset: u32,
|
||||
pub index_count: u32,
|
||||
/// Optional scissor rect for clipping. None means no clipping.
|
||||
pub scissor: Option<ScissorRect>,
|
||||
}
|
||||
|
||||
pub struct DrawList {
|
||||
pub vertices: Vec<DrawVertex>,
|
||||
pub indices: Vec<u16>,
|
||||
pub commands: Vec<DrawCommand>,
|
||||
scissor_stack: Vec<ScissorRect>,
|
||||
}
|
||||
|
||||
impl DrawList {
|
||||
@@ -26,6 +38,7 @@ impl DrawList {
|
||||
vertices: Vec::new(),
|
||||
indices: Vec::new(),
|
||||
commands: Vec::new(),
|
||||
scissor_stack: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +46,23 @@ impl DrawList {
|
||||
self.vertices.clear();
|
||||
self.indices.clear();
|
||||
self.commands.clear();
|
||||
self.scissor_stack.clear();
|
||||
}
|
||||
|
||||
/// Push a scissor rect onto the stack. All subsequent draw commands will
|
||||
/// be clipped to this rectangle until `pop_scissor` is called.
|
||||
pub fn push_scissor(&mut self, x: u32, y: u32, w: u32, h: u32) {
|
||||
self.scissor_stack.push(ScissorRect { x, y, w, h });
|
||||
}
|
||||
|
||||
/// Pop the current scissor rect from the stack.
|
||||
pub fn pop_scissor(&mut self) {
|
||||
self.scissor_stack.pop();
|
||||
}
|
||||
|
||||
/// Returns the current scissor rect (top of stack), or None.
|
||||
fn current_scissor(&self) -> Option<ScissorRect> {
|
||||
self.scissor_stack.last().copied()
|
||||
}
|
||||
|
||||
/// Add a solid-color rectangle. UV is (0,0) for solid color rendering.
|
||||
@@ -67,6 +97,7 @@ impl DrawList {
|
||||
self.commands.push(DrawCommand {
|
||||
index_offset,
|
||||
index_count: 6,
|
||||
scissor: self.current_scissor(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -286,8 +286,13 @@ impl UiRenderer {
|
||||
pass.set_vertex_buffer(0, vertex_buffer.slice(..));
|
||||
pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint16);
|
||||
|
||||
// Draw each command
|
||||
// Draw each command (with optional scissor clipping)
|
||||
for cmd in &draw_list.commands {
|
||||
if let Some(scissor) = &cmd.scissor {
|
||||
pass.set_scissor_rect(scissor.x, scissor.y, scissor.w, scissor.h);
|
||||
} else {
|
||||
pass.set_scissor_rect(0, 0, screen_w as u32, screen_h as u32);
|
||||
}
|
||||
pass.draw_indexed(
|
||||
cmd.index_offset..cmd.index_offset + cmd.index_count,
|
||||
0,
|
||||
|
||||
@@ -1,7 +1,19 @@
|
||||
use std::collections::HashMap;
|
||||
use crate::draw_list::DrawList;
|
||||
use crate::font::FontAtlas;
|
||||
use crate::layout::LayoutState;
|
||||
|
||||
/// Key events the UI system understands.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Key {
|
||||
Left,
|
||||
Right,
|
||||
Backspace,
|
||||
Delete,
|
||||
Home,
|
||||
End,
|
||||
}
|
||||
|
||||
pub struct UiContext {
|
||||
pub hot: Option<u64>,
|
||||
pub active: Option<u64>,
|
||||
@@ -12,11 +24,23 @@ pub struct UiContext {
|
||||
pub mouse_down: bool,
|
||||
pub mouse_clicked: bool,
|
||||
pub mouse_released: bool,
|
||||
pub mouse_scroll: f32,
|
||||
pub screen_width: f32,
|
||||
pub screen_height: f32,
|
||||
pub font: FontAtlas,
|
||||
id_counter: u64,
|
||||
prev_mouse_down: bool,
|
||||
// Text input state
|
||||
pub focused_id: Option<u32>,
|
||||
pub cursor_pos: usize,
|
||||
input_chars: Vec<char>,
|
||||
input_keys: Vec<Key>,
|
||||
// Scroll panel state
|
||||
pub scroll_offsets: HashMap<u32, f32>,
|
||||
// Drag and drop state
|
||||
pub dragging: Option<(u32, u64)>,
|
||||
pub drag_start: (f32, f32),
|
||||
pub(crate) drag_started: bool,
|
||||
}
|
||||
|
||||
impl UiContext {
|
||||
@@ -32,11 +56,20 @@ impl UiContext {
|
||||
mouse_down: false,
|
||||
mouse_clicked: false,
|
||||
mouse_released: false,
|
||||
mouse_scroll: 0.0,
|
||||
screen_width: screen_w,
|
||||
screen_height: screen_h,
|
||||
font: FontAtlas::generate(),
|
||||
id_counter: 0,
|
||||
prev_mouse_down: false,
|
||||
focused_id: None,
|
||||
cursor_pos: 0,
|
||||
input_chars: Vec::new(),
|
||||
input_keys: Vec::new(),
|
||||
scroll_offsets: HashMap::new(),
|
||||
dragging: None,
|
||||
drag_start: (0.0, 0.0),
|
||||
drag_started: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,9 +93,39 @@ impl UiContext {
|
||||
self.layout = LayoutState::new(0.0, 0.0);
|
||||
}
|
||||
|
||||
/// Feed a character input event (printable ASCII) for text input widgets.
|
||||
pub fn input_char(&mut self, ch: char) {
|
||||
if ch.is_ascii() && !ch.is_ascii_control() {
|
||||
self.input_chars.push(ch);
|
||||
}
|
||||
}
|
||||
|
||||
/// Feed a key input event for text input widgets.
|
||||
pub fn input_key(&mut self, key: Key) {
|
||||
self.input_keys.push(key);
|
||||
}
|
||||
|
||||
/// Set mouse scroll delta for this frame (positive = scroll up).
|
||||
pub fn set_scroll(&mut self, delta: f32) {
|
||||
self.mouse_scroll = delta;
|
||||
}
|
||||
|
||||
/// Drain all pending input chars (consumed by text_input widget).
|
||||
pub(crate) fn drain_chars(&mut self) -> Vec<char> {
|
||||
std::mem::take(&mut self.input_chars)
|
||||
}
|
||||
|
||||
/// Drain all pending key events (consumed by text_input widget).
|
||||
pub(crate) fn drain_keys(&mut self) -> Vec<Key> {
|
||||
std::mem::take(&mut self.input_keys)
|
||||
}
|
||||
|
||||
/// End the current frame.
|
||||
pub fn end_frame(&mut self) {
|
||||
// Nothing for now — GPU submission will hook in here later.
|
||||
self.mouse_scroll = 0.0;
|
||||
// Clear any unconsumed input
|
||||
self.input_chars.clear();
|
||||
self.input_keys.clear();
|
||||
}
|
||||
|
||||
/// Generate a new unique ID for this frame.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::ui_context::UiContext;
|
||||
use crate::ui_context::{Key, UiContext};
|
||||
|
||||
// Color palette
|
||||
const COLOR_BG: [u8; 4] = [0x2B, 0x2B, 0x2B, 0xFF];
|
||||
@@ -11,6 +11,13 @@ const COLOR_SLIDER_BG: [u8; 4] = [0x44, 0x44, 0x44, 0xFF];
|
||||
const COLOR_SLIDER_HANDLE: [u8; 4] = [0x88, 0x88, 0xFF, 0xFF];
|
||||
const COLOR_CHECK_BG: [u8; 4] = [0x44, 0x44, 0x44, 0xFF];
|
||||
const COLOR_CHECK_MARK: [u8; 4] = [0x88, 0xFF, 0x88, 0xFF];
|
||||
const COLOR_INPUT_BG: [u8; 4] = [0x22, 0x22, 0x22, 0xFF];
|
||||
const COLOR_INPUT_BORDER: [u8; 4] = [0x66, 0x66, 0x66, 0xFF];
|
||||
const COLOR_INPUT_FOCUSED: [u8; 4] = [0x44, 0x88, 0xFF, 0xFF];
|
||||
const COLOR_CURSOR: [u8; 4] = [0xFF, 0xFF, 0xFF, 0xFF];
|
||||
const COLOR_SCROLLBAR_BG: [u8; 4] = [0x33, 0x33, 0x33, 0xFF];
|
||||
const COLOR_SCROLLBAR_THUMB: [u8; 4]= [0x66, 0x66, 0x77, 0xFF];
|
||||
const DRAG_THRESHOLD: f32 = 5.0;
|
||||
|
||||
impl UiContext {
|
||||
/// Draw text at the current cursor position and advance to the next line.
|
||||
@@ -233,11 +240,231 @@ impl UiContext {
|
||||
pub fn end_panel(&mut self) {
|
||||
// Nothing for now; future could restore outer cursor state.
|
||||
}
|
||||
|
||||
// ── Text Input Widget ─────────────────────────────────────────────
|
||||
|
||||
/// Draw an editable single-line text input. Returns true if the buffer changed.
|
||||
///
|
||||
/// `id` must be unique per text input. The widget renders a box at (x, y) with
|
||||
/// the given `width`. Height is determined by the font glyph height + padding.
|
||||
pub fn text_input(&mut self, id: u32, buffer: &mut String, x: f32, y: f32, width: f32) -> bool {
|
||||
let gw = self.font.glyph_width as f32;
|
||||
let gh = self.font.glyph_height as f32;
|
||||
let padding = self.layout.padding;
|
||||
let height = gh + padding * 2.0;
|
||||
|
||||
let hovered = self.mouse_in_rect(x, y, width, height);
|
||||
|
||||
// Click to focus / unfocus
|
||||
if self.mouse_clicked {
|
||||
if hovered {
|
||||
self.focused_id = Some(id);
|
||||
// Place cursor at end or at click position
|
||||
let click_offset = ((self.mouse_x - x - padding) / gw).round() as usize;
|
||||
self.cursor_pos = click_offset.min(buffer.len());
|
||||
} else if self.focused_id == Some(id) {
|
||||
self.focused_id = None;
|
||||
}
|
||||
}
|
||||
|
||||
let mut changed = false;
|
||||
|
||||
// Process input only if focused
|
||||
if self.focused_id == Some(id) {
|
||||
// Ensure cursor_pos is valid
|
||||
if self.cursor_pos > buffer.len() {
|
||||
self.cursor_pos = buffer.len();
|
||||
}
|
||||
|
||||
// Process character input
|
||||
let chars = self.drain_chars();
|
||||
for ch in chars {
|
||||
buffer.insert(self.cursor_pos, ch);
|
||||
self.cursor_pos += 1;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
// Process key input
|
||||
let keys = self.drain_keys();
|
||||
for key in keys {
|
||||
match key {
|
||||
Key::Backspace => {
|
||||
if self.cursor_pos > 0 {
|
||||
buffer.remove(self.cursor_pos - 1);
|
||||
self.cursor_pos -= 1;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
Key::Delete => {
|
||||
if self.cursor_pos < buffer.len() {
|
||||
buffer.remove(self.cursor_pos);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
Key::Left => {
|
||||
if self.cursor_pos > 0 {
|
||||
self.cursor_pos -= 1;
|
||||
}
|
||||
}
|
||||
Key::Right => {
|
||||
if self.cursor_pos < buffer.len() {
|
||||
self.cursor_pos += 1;
|
||||
}
|
||||
}
|
||||
Key::Home => {
|
||||
self.cursor_pos = 0;
|
||||
}
|
||||
Key::End => {
|
||||
self.cursor_pos = buffer.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Draw border
|
||||
let border_color = if self.focused_id == Some(id) {
|
||||
COLOR_INPUT_FOCUSED
|
||||
} else {
|
||||
COLOR_INPUT_BORDER
|
||||
};
|
||||
self.draw_list.add_rect(x, y, width, height, border_color);
|
||||
// Draw inner background (1px border)
|
||||
self.draw_list.add_rect(x + 1.0, y + 1.0, width - 2.0, height - 2.0, COLOR_INPUT_BG);
|
||||
|
||||
// Draw text
|
||||
let text_x = x + padding;
|
||||
let text_y = y + padding;
|
||||
let mut cx = text_x;
|
||||
for ch in buffer.chars() {
|
||||
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
|
||||
self.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += gw;
|
||||
}
|
||||
|
||||
// Draw cursor if focused
|
||||
if self.focused_id == Some(id) {
|
||||
let cursor_x = text_x + self.cursor_pos as f32 * gw;
|
||||
self.draw_list.add_rect(cursor_x, text_y, 1.0, gh, COLOR_CURSOR);
|
||||
}
|
||||
|
||||
self.layout.advance_line();
|
||||
changed
|
||||
}
|
||||
|
||||
// ── Scroll Panel ──────────────────────────────────────────────────
|
||||
|
||||
/// Begin a scrollable panel. Content drawn between begin/end will be clipped
|
||||
/// to the panel bounds. `content_height` is the total height of the content
|
||||
/// inside the panel (used to compute scrollbar size).
|
||||
pub fn begin_scroll_panel(&mut self, id: u32, x: f32, y: f32, w: f32, h: f32, content_height: f32) {
|
||||
let scrollbar_w = 12.0_f32;
|
||||
let panel_inner_w = w - scrollbar_w;
|
||||
|
||||
// Handle mouse wheel when hovering over the panel
|
||||
let hovered = self.mouse_in_rect(x, y, w, h);
|
||||
let scroll_delta = if hovered && self.mouse_scroll.abs() > 0.0 {
|
||||
-self.mouse_scroll * 20.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Get or create scroll offset, apply delta and clamp
|
||||
let scroll = self.scroll_offsets.entry(id).or_insert(0.0);
|
||||
*scroll += scroll_delta;
|
||||
let max_scroll = (content_height - h).max(0.0);
|
||||
*scroll = scroll.clamp(0.0, max_scroll);
|
||||
let current_scroll = *scroll;
|
||||
|
||||
// Draw panel background
|
||||
self.draw_list.add_rect(x, y, w, h, COLOR_PANEL);
|
||||
|
||||
// Draw scrollbar track
|
||||
let sb_x = x + panel_inner_w;
|
||||
self.draw_list.add_rect(sb_x, y, scrollbar_w, h, COLOR_SCROLLBAR_BG);
|
||||
|
||||
// Draw scrollbar thumb
|
||||
if content_height > h {
|
||||
let thumb_ratio = h / content_height;
|
||||
let thumb_h = (thumb_ratio * h).max(16.0);
|
||||
let scroll_ratio = if max_scroll > 0.0 { current_scroll / max_scroll } else { 0.0 };
|
||||
let thumb_y = y + scroll_ratio * (h - thumb_h);
|
||||
self.draw_list.add_rect(sb_x, thumb_y, scrollbar_w, thumb_h, COLOR_SCROLLBAR_THUMB);
|
||||
}
|
||||
|
||||
// Push scissor rect for content clipping
|
||||
self.draw_list.push_scissor(x as u32, y as u32, panel_inner_w as u32, h as u32);
|
||||
|
||||
// Set cursor inside panel, offset by scroll
|
||||
self.layout = crate::layout::LayoutState::new(x + self.layout.padding, y + self.layout.padding - current_scroll);
|
||||
}
|
||||
|
||||
/// End a scrollable panel. Pops the scissor rect.
|
||||
pub fn end_scroll_panel(&mut self) {
|
||||
self.draw_list.pop_scissor();
|
||||
}
|
||||
|
||||
// ── Drag and Drop ─────────────────────────────────────────────────
|
||||
|
||||
/// Begin dragging an item. Call this when the user presses down on a draggable element.
|
||||
/// `id` identifies the source, `payload` is an arbitrary u64 value transferred on drop.
|
||||
pub fn begin_drag(&mut self, id: u32, payload: u64) {
|
||||
if self.mouse_clicked {
|
||||
self.dragging = Some((id, payload));
|
||||
self.drag_start = (self.mouse_x, self.mouse_y);
|
||||
self.drag_started = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if a drag operation is currently in progress (past the threshold).
|
||||
pub fn is_dragging(&self) -> bool {
|
||||
if let Some(_) = self.dragging {
|
||||
self.drag_started
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// End the current drag operation. Returns `Some((source_id, payload))` if a drag
|
||||
/// was in progress and the mouse was released, otherwise `None`.
|
||||
pub fn end_drag(&mut self) -> Option<(u32, u64)> {
|
||||
// Update drag started state based on threshold
|
||||
if let Some(_) = self.dragging {
|
||||
if !self.drag_started {
|
||||
let dx = self.mouse_x - self.drag_start.0;
|
||||
let dy = self.mouse_y - self.drag_start.1;
|
||||
if (dx * dx + dy * dy).sqrt() >= DRAG_THRESHOLD {
|
||||
self.drag_started = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.mouse_released {
|
||||
let result = if self.drag_started { self.dragging } else { None };
|
||||
self.dragging = None;
|
||||
self.drag_started = false;
|
||||
result
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Declare a drop target region. If a drag is released over this target,
|
||||
/// returns the payload that was dropped. Otherwise returns `None`.
|
||||
pub fn drop_target(&mut self, _id: u32, x: f32, y: f32, w: f32, h: f32) -> Option<u64> {
|
||||
if self.mouse_released && self.drag_started {
|
||||
if self.mouse_in_rect(x, y, w, h) {
|
||||
if let Some((_src_id, payload)) = self.dragging {
|
||||
return Some(payload);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::ui_context::UiContext;
|
||||
use crate::ui_context::{Key, UiContext};
|
||||
|
||||
#[test]
|
||||
fn test_button_returns_false_when_not_clicked() {
|
||||
@@ -278,4 +505,241 @@ mod tests {
|
||||
let v2 = ctx.slider("test", -10.0, 0.0, 100.0);
|
||||
assert!((v2 - 0.0).abs() < 1e-6, "slider should clamp to min: got {}", v2);
|
||||
}
|
||||
|
||||
// ── Text Input Tests ──────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_text_input_basic_typing() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::new();
|
||||
|
||||
// Click on the text input to focus it (at x=10, y=10, width=200)
|
||||
ctx.begin_frame(15.0, 15.0, true);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
|
||||
// Now type some characters
|
||||
ctx.begin_frame(15.0, 15.0, false);
|
||||
ctx.input_char('H');
|
||||
ctx.input_char('i');
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(changed);
|
||||
assert_eq!(buf, "Hi");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_input_backspace() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::from("abc");
|
||||
|
||||
// Focus — click far right so cursor goes to end (padding=4, gw=8, 3 chars → need x > 10+4+24=38)
|
||||
ctx.begin_frame(50.0, 15.0, true);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
|
||||
// Backspace
|
||||
ctx.begin_frame(50.0, 15.0, false);
|
||||
ctx.input_key(Key::Backspace);
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(changed);
|
||||
assert_eq!(buf, "ab");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_input_cursor_movement() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::from("abc");
|
||||
|
||||
// Focus — click far right so cursor goes to end
|
||||
ctx.begin_frame(50.0, 15.0, true);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert_eq!(ctx.cursor_pos, 3); // cursor at end of "abc"
|
||||
|
||||
// Move cursor to beginning with Home
|
||||
ctx.begin_frame(15.0, 15.0, false);
|
||||
ctx.input_key(Key::Home);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert_eq!(ctx.cursor_pos, 0);
|
||||
|
||||
// Type 'X' at beginning
|
||||
ctx.begin_frame(15.0, 15.0, false);
|
||||
ctx.input_char('X');
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(changed);
|
||||
assert_eq!(buf, "Xabc");
|
||||
assert_eq!(ctx.cursor_pos, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_input_delete_key() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::from("abc");
|
||||
|
||||
// Focus
|
||||
ctx.begin_frame(15.0, 15.0, true);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
|
||||
// Move to Home, then Delete
|
||||
ctx.begin_frame(15.0, 15.0, false);
|
||||
ctx.input_key(Key::Home);
|
||||
ctx.input_key(Key::Delete);
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(changed);
|
||||
assert_eq!(buf, "bc");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_input_arrow_keys() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::from("hello");
|
||||
|
||||
// Focus — click far right so cursor at end
|
||||
ctx.begin_frame(100.0, 15.0, true);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
|
||||
// Left twice from end (pos 5→3)
|
||||
ctx.begin_frame(100.0, 15.0, false);
|
||||
ctx.input_key(Key::Left);
|
||||
ctx.input_key(Key::Left);
|
||||
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert_eq!(ctx.cursor_pos, 3);
|
||||
|
||||
// Type 'X' at position 3
|
||||
ctx.begin_frame(100.0, 15.0, false);
|
||||
ctx.input_char('X');
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(changed);
|
||||
assert_eq!(buf, "helXlo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_input_no_change_when_not_focused() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
let mut buf = String::from("test");
|
||||
|
||||
// Don't click on the input — mouse at (500, 500) far away
|
||||
ctx.begin_frame(500.0, 500.0, true);
|
||||
ctx.input_char('X');
|
||||
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
|
||||
assert!(!changed);
|
||||
assert_eq!(buf, "test");
|
||||
}
|
||||
|
||||
// ── Scroll Panel Tests ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_scroll_offset_clamping() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
// Panel at (0,0), 200x100, content_height=300
|
||||
// Scroll down a lot
|
||||
ctx.begin_frame(100.0, 50.0, false);
|
||||
ctx.set_scroll(-100.0); // scroll down
|
||||
ctx.begin_scroll_panel(1, 0.0, 0.0, 200.0, 100.0, 300.0);
|
||||
ctx.end_scroll_panel();
|
||||
|
||||
// max_scroll = 300 - 100 = 200; scroll should be clamped
|
||||
let scroll = ctx.scroll_offsets.get(&1).copied().unwrap_or(0.0);
|
||||
assert!(scroll >= 0.0 && scroll <= 200.0, "scroll={}", scroll);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_offset_does_not_go_negative() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
// Scroll up when already at top
|
||||
ctx.begin_frame(100.0, 50.0, false);
|
||||
ctx.set_scroll(100.0); // scroll up
|
||||
ctx.begin_scroll_panel(1, 0.0, 0.0, 200.0, 100.0, 300.0);
|
||||
ctx.end_scroll_panel();
|
||||
|
||||
let scroll = ctx.scroll_offsets.get(&1).copied().unwrap_or(0.0);
|
||||
assert!((scroll - 0.0).abs() < 1e-6, "scroll should be 0, got {}", scroll);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_panel_content_clipping() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
ctx.begin_frame(100.0, 50.0, false);
|
||||
ctx.begin_scroll_panel(1, 10.0, 20.0, 200.0, 100.0, 300.0);
|
||||
|
||||
// Draw some content inside
|
||||
ctx.text("Inside scroll");
|
||||
// Commands drawn inside should have a scissor rect
|
||||
let has_scissor = ctx.draw_list.commands.iter().any(|c| c.scissor.is_some());
|
||||
assert!(has_scissor, "commands inside scroll panel should have scissor rects");
|
||||
|
||||
ctx.end_scroll_panel();
|
||||
|
||||
// Commands drawn after end_scroll_panel should NOT have scissor
|
||||
let cmds_before = ctx.draw_list.commands.len();
|
||||
ctx.text("Outside scroll");
|
||||
let new_cmds = &ctx.draw_list.commands[cmds_before..];
|
||||
let has_scissor_after = new_cmds.iter().any(|c| c.scissor.is_some());
|
||||
assert!(!has_scissor_after, "commands after end_scroll_panel should not have scissor");
|
||||
}
|
||||
|
||||
// ── Drag and Drop Tests ──────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_drag_start_and_end() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
// Frame 1: mouse down — begin drag
|
||||
ctx.begin_frame(100.0, 100.0, true);
|
||||
ctx.begin_drag(1, 42);
|
||||
assert!(!ctx.is_dragging(), "should not be dragging yet (below threshold)");
|
||||
let _ = ctx.end_drag();
|
||||
|
||||
// Frame 2: mouse moved past threshold, still down
|
||||
ctx.begin_frame(110.0, 100.0, true);
|
||||
let _ = ctx.end_drag();
|
||||
assert!(ctx.is_dragging(), "should be dragging after moving past threshold");
|
||||
|
||||
// Frame 3: mouse released
|
||||
ctx.begin_frame(120.0, 100.0, false);
|
||||
let result = ctx.end_drag();
|
||||
assert!(result.is_some());
|
||||
let (src_id, payload) = result.unwrap();
|
||||
assert_eq!(src_id, 1);
|
||||
assert_eq!(payload, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_on_target() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
// Frame 1: begin drag
|
||||
ctx.begin_frame(100.0, 100.0, true);
|
||||
ctx.begin_drag(1, 99);
|
||||
let _ = ctx.end_drag();
|
||||
|
||||
// Frame 2: move past threshold
|
||||
ctx.begin_frame(110.0, 100.0, true);
|
||||
let _ = ctx.end_drag();
|
||||
|
||||
// Frame 3: release over drop target at (200, 200, 50, 50)
|
||||
ctx.begin_frame(220.0, 220.0, false);
|
||||
let drop_result = ctx.drop_target(2, 200.0, 200.0, 50.0, 50.0);
|
||||
assert_eq!(drop_result, Some(99));
|
||||
let _ = ctx.end_drag();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_outside_target() {
|
||||
let mut ctx = UiContext::new(800.0, 600.0);
|
||||
|
||||
// Frame 1: begin drag
|
||||
ctx.begin_frame(100.0, 100.0, true);
|
||||
ctx.begin_drag(1, 77);
|
||||
let _ = ctx.end_drag();
|
||||
|
||||
// Frame 2: move past threshold
|
||||
ctx.begin_frame(110.0, 100.0, true);
|
||||
let _ = ctx.end_drag();
|
||||
|
||||
// Frame 3: release far from drop target
|
||||
ctx.begin_frame(500.0, 500.0, false);
|
||||
let drop_result = ctx.drop_target(2, 200.0, 200.0, 50.0, 50.0);
|
||||
assert_eq!(drop_result, None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +174,64 @@ impl Mat4 {
|
||||
)
|
||||
}
|
||||
|
||||
/// Compute the inverse of this matrix. Returns `None` if the matrix is singular.
|
||||
pub fn inverse(&self) -> Option<Self> {
|
||||
let m = &self.cols;
|
||||
// Flatten to row-major for cofactor expansion
|
||||
// m[col][row] — so element (row, col) = m[col][row]
|
||||
let e = |r: usize, c: usize| -> f32 { m[c][r] };
|
||||
|
||||
// Compute cofactors using 2x2 determinants
|
||||
let s0 = e(0,0) * e(1,1) - e(1,0) * e(0,1);
|
||||
let s1 = e(0,0) * e(1,2) - e(1,0) * e(0,2);
|
||||
let s2 = e(0,0) * e(1,3) - e(1,0) * e(0,3);
|
||||
let s3 = e(0,1) * e(1,2) - e(1,1) * e(0,2);
|
||||
let s4 = e(0,1) * e(1,3) - e(1,1) * e(0,3);
|
||||
let s5 = e(0,2) * e(1,3) - e(1,2) * e(0,3);
|
||||
|
||||
let c5 = e(2,2) * e(3,3) - e(3,2) * e(2,3);
|
||||
let c4 = e(2,1) * e(3,3) - e(3,1) * e(2,3);
|
||||
let c3 = e(2,1) * e(3,2) - e(3,1) * e(2,2);
|
||||
let c2 = e(2,0) * e(3,3) - e(3,0) * e(2,3);
|
||||
let c1 = e(2,0) * e(3,2) - e(3,0) * e(2,2);
|
||||
let c0 = e(2,0) * e(3,1) - e(3,0) * e(2,1);
|
||||
|
||||
let det = s0 * c5 - s1 * c4 + s2 * c3 + s3 * c2 - s4 * c1 + s5 * c0;
|
||||
if det.abs() < 1e-12 {
|
||||
return None;
|
||||
}
|
||||
let inv_det = 1.0 / det;
|
||||
|
||||
// Adjugate matrix (transposed cofactor matrix), stored column-major
|
||||
let inv = Self::from_cols(
|
||||
[
|
||||
( e(1,1) * c5 - e(1,2) * c4 + e(1,3) * c3) * inv_det,
|
||||
(-e(0,1) * c5 + e(0,2) * c4 - e(0,3) * c3) * inv_det,
|
||||
( e(3,1) * s5 - e(3,2) * s4 + e(3,3) * s3) * inv_det,
|
||||
(-e(2,1) * s5 + e(2,2) * s4 - e(2,3) * s3) * inv_det,
|
||||
],
|
||||
[
|
||||
(-e(1,0) * c5 + e(1,2) * c2 - e(1,3) * c1) * inv_det,
|
||||
( e(0,0) * c5 - e(0,2) * c2 + e(0,3) * c1) * inv_det,
|
||||
(-e(3,0) * s5 + e(3,2) * s2 - e(3,3) * s1) * inv_det,
|
||||
( e(2,0) * s5 - e(2,2) * s2 + e(2,3) * s1) * inv_det,
|
||||
],
|
||||
[
|
||||
( e(1,0) * c4 - e(1,1) * c2 + e(1,3) * c0) * inv_det,
|
||||
(-e(0,0) * c4 + e(0,1) * c2 - e(0,3) * c0) * inv_det,
|
||||
( e(3,0) * s4 - e(3,1) * s2 + e(3,3) * s0) * inv_det,
|
||||
(-e(2,0) * s4 + e(2,1) * s2 - e(2,3) * s0) * inv_det,
|
||||
],
|
||||
[
|
||||
(-e(1,0) * c3 + e(1,1) * c1 - e(1,2) * c0) * inv_det,
|
||||
( e(0,0) * c3 - e(0,1) * c1 + e(0,2) * c0) * inv_det,
|
||||
(-e(3,0) * s3 + e(3,1) * s1 - e(3,2) * s0) * inv_det,
|
||||
( e(2,0) * s3 - e(2,1) * s1 + e(2,2) * s0) * inv_det,
|
||||
],
|
||||
);
|
||||
Some(inv)
|
||||
}
|
||||
|
||||
/// Return the transpose of this matrix.
|
||||
pub fn transpose(&self) -> Self {
|
||||
let c = &self.cols;
|
||||
|
||||
234
crates/voltex_net/src/interpolation.rs
Normal file
234
crates/voltex_net/src/interpolation.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use crate::snapshot::{EntityState, Snapshot};
|
||||
|
||||
/// Buffers recent snapshots and interpolates between them for smooth rendering.
|
||||
pub struct InterpolationBuffer {
|
||||
snapshots: VecDeque<(f64, Snapshot)>,
|
||||
/// Render delay behind the latest server time (seconds).
|
||||
interp_delay: f64,
|
||||
/// Maximum number of snapshots to keep in the buffer.
|
||||
max_snapshots: usize,
|
||||
}
|
||||
|
||||
impl InterpolationBuffer {
|
||||
/// Create a new interpolation buffer with the given delay in seconds.
|
||||
pub fn new(interp_delay: f64) -> Self {
|
||||
InterpolationBuffer {
|
||||
snapshots: VecDeque::new(),
|
||||
interp_delay,
|
||||
max_snapshots: 32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a new snapshot with its server timestamp.
|
||||
pub fn push(&mut self, server_time: f64, snapshot: Snapshot) {
|
||||
self.snapshots.push_back((server_time, snapshot));
|
||||
// Evict old snapshots beyond the buffer limit
|
||||
while self.snapshots.len() > self.max_snapshots {
|
||||
self.snapshots.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpolate to produce a snapshot for the given render_time.
|
||||
///
|
||||
/// The render_time should be `current_server_time - interp_delay`.
|
||||
/// Returns None if there are fewer than 2 snapshots or render_time
|
||||
/// is before all buffered snapshots.
|
||||
pub fn interpolate(&self, render_time: f64) -> Option<Snapshot> {
|
||||
if self.snapshots.len() < 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Find two bracketing snapshots: the last one <= render_time and the first one > render_time
|
||||
let mut before = None;
|
||||
let mut after = None;
|
||||
|
||||
for (i, (time, _)) in self.snapshots.iter().enumerate() {
|
||||
if *time <= render_time {
|
||||
before = Some(i);
|
||||
} else {
|
||||
after = Some(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
match (before, after) {
|
||||
(Some(b), Some(a)) => {
|
||||
let (t0, snap0) = &self.snapshots[b];
|
||||
let (t1, snap1) = &self.snapshots[a];
|
||||
let dt = t1 - t0;
|
||||
if dt <= 0.0 {
|
||||
return Some(snap0.clone());
|
||||
}
|
||||
let alpha = ((render_time - t0) / dt).clamp(0.0, 1.0) as f32;
|
||||
Some(lerp_snapshots(snap0, snap1, alpha))
|
||||
}
|
||||
(Some(b), None) => {
|
||||
// render_time is beyond all snapshots — return the latest
|
||||
Some(self.snapshots[b].1.clone())
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the interpolation delay.
|
||||
pub fn delay(&self) -> f64 {
|
||||
self.interp_delay
|
||||
}
|
||||
}
|
||||
|
||||
fn lerp(a: f32, b: f32, t: f32) -> f32 {
|
||||
a + (b - a) * t
|
||||
}
|
||||
|
||||
fn lerp_f32x3(a: &[f32; 3], b: &[f32; 3], t: f32) -> [f32; 3] {
|
||||
[lerp(a[0], b[0], t), lerp(a[1], b[1], t), lerp(a[2], b[2], t)]
|
||||
}
|
||||
|
||||
fn lerp_entity(a: &EntityState, b: &EntityState, t: f32) -> EntityState {
|
||||
EntityState {
|
||||
id: a.id,
|
||||
position: lerp_f32x3(&a.position, &b.position, t),
|
||||
rotation: lerp_f32x3(&a.rotation, &b.rotation, t),
|
||||
velocity: lerp_f32x3(&a.velocity, &b.velocity, t),
|
||||
}
|
||||
}
|
||||
|
||||
/// Linearly interpolate between two snapshots.
|
||||
/// Entities are matched by id. Entities only in one snapshot are included as-is.
|
||||
fn lerp_snapshots(a: &Snapshot, b: &Snapshot, t: f32) -> Snapshot {
|
||||
use std::collections::HashMap;
|
||||
|
||||
let a_map: HashMap<u32, &EntityState> = a.entities.iter().map(|e| (e.id, e)).collect();
|
||||
let b_map: HashMap<u32, &EntityState> = b.entities.iter().map(|e| (e.id, e)).collect();
|
||||
|
||||
let mut entities = Vec::new();
|
||||
|
||||
// Interpolate matched entities, include a-only entities
|
||||
for ea in &a.entities {
|
||||
if let Some(eb) = b_map.get(&ea.id) {
|
||||
entities.push(lerp_entity(ea, eb, t));
|
||||
} else {
|
||||
entities.push(ea.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Include b-only entities
|
||||
for eb in &b.entities {
|
||||
if !a_map.contains_key(&eb.id) {
|
||||
entities.push(eb.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Interpolate tick
|
||||
let tick = (a.tick as f64 + (b.tick as f64 - a.tick as f64) * t as f64) as u32;
|
||||
|
||||
Snapshot { tick, entities }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::snapshot::EntityState;
|
||||
|
||||
fn make_snapshot(tick: u32, x: f32) -> Snapshot {
|
||||
Snapshot {
|
||||
tick,
|
||||
entities: vec![EntityState {
|
||||
id: 1,
|
||||
position: [x, 0.0, 0.0],
|
||||
rotation: [0.0, 0.0, 0.0],
|
||||
velocity: [0.0, 0.0, 0.0],
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exact_match_at_snapshot_time() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
buf.push(0.1, make_snapshot(1, 10.0));
|
||||
|
||||
let result = buf.interpolate(0.0).expect("should interpolate");
|
||||
assert_eq!(result.entities[0].position[0], 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_midpoint_interpolation() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
buf.push(1.0, make_snapshot(10, 10.0));
|
||||
|
||||
let result = buf.interpolate(0.5).expect("should interpolate");
|
||||
let x = result.entities[0].position[0];
|
||||
assert!(
|
||||
(x - 5.0).abs() < 0.001,
|
||||
"Expected ~5.0 at midpoint, got {}",
|
||||
x
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interpolation_at_quarter() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
buf.push(1.0, make_snapshot(10, 100.0));
|
||||
|
||||
let result = buf.interpolate(0.25).unwrap();
|
||||
let x = result.entities[0].position[0];
|
||||
assert!(
|
||||
(x - 25.0).abs() < 0.01,
|
||||
"Expected ~25.0 at 0.25, got {}",
|
||||
x
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extrapolation_returns_latest() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
buf.push(1.0, make_snapshot(10, 10.0));
|
||||
|
||||
// render_time beyond all snapshots
|
||||
let result = buf.interpolate(2.0).expect("should return latest");
|
||||
assert_eq!(result.entities[0].position[0], 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_too_few_snapshots_returns_none() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
assert!(buf.interpolate(0.0).is_none());
|
||||
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
assert!(buf.interpolate(0.0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_time_before_all_snapshots() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(1.0, make_snapshot(10, 10.0));
|
||||
buf.push(2.0, make_snapshot(20, 20.0));
|
||||
|
||||
// render_time before the first snapshot
|
||||
let result = buf.interpolate(0.0);
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_snapshots_picks_correct_bracket() {
|
||||
let mut buf = InterpolationBuffer::new(0.1);
|
||||
buf.push(0.0, make_snapshot(0, 0.0));
|
||||
buf.push(1.0, make_snapshot(1, 10.0));
|
||||
buf.push(2.0, make_snapshot(2, 20.0));
|
||||
|
||||
// Should interpolate between snapshot at t=1 and t=2
|
||||
let result = buf.interpolate(1.5).unwrap();
|
||||
let x = result.entities[0].position[0];
|
||||
assert!(
|
||||
(x - 15.0).abs() < 0.01,
|
||||
"Expected ~15.0, got {}",
|
||||
x
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,14 @@ pub mod packet;
|
||||
pub mod socket;
|
||||
pub mod server;
|
||||
pub mod client;
|
||||
pub mod reliable;
|
||||
pub mod snapshot;
|
||||
pub mod interpolation;
|
||||
|
||||
pub use packet::Packet;
|
||||
pub use socket::NetSocket;
|
||||
pub use server::{NetServer, ServerEvent, ClientInfo};
|
||||
pub use client::{NetClient, ClientEvent};
|
||||
pub use reliable::{ReliableChannel, OrderedChannel};
|
||||
pub use snapshot::{Snapshot, EntityState, serialize_snapshot, deserialize_snapshot, diff_snapshots, apply_diff};
|
||||
pub use interpolation::InterpolationBuffer;
|
||||
|
||||
@@ -5,6 +5,10 @@ const TYPE_DISCONNECT: u8 = 3;
|
||||
const TYPE_PING: u8 = 4;
|
||||
const TYPE_PONG: u8 = 5;
|
||||
const TYPE_USER_DATA: u8 = 6;
|
||||
const TYPE_RELIABLE: u8 = 7;
|
||||
const TYPE_ACK: u8 = 8;
|
||||
const TYPE_SNAPSHOT: u8 = 9;
|
||||
const TYPE_SNAPSHOT_DELTA: u8 = 10;
|
||||
|
||||
/// Header size: type_id(1) + payload_len(2 LE) + reserved(1) = 4 bytes
|
||||
const HEADER_SIZE: usize = 4;
|
||||
@@ -18,6 +22,10 @@ pub enum Packet {
|
||||
Ping { timestamp: u64 },
|
||||
Pong { timestamp: u64 },
|
||||
UserData { client_id: u32, data: Vec<u8> },
|
||||
Reliable { sequence: u16, data: Vec<u8> },
|
||||
Ack { sequence: u16 },
|
||||
Snapshot { tick: u32, data: Vec<u8> },
|
||||
SnapshotDelta { base_tick: u32, tick: u32, data: Vec<u8> },
|
||||
}
|
||||
|
||||
impl Packet {
|
||||
@@ -112,6 +120,38 @@ impl Packet {
|
||||
let data = payload[4..].to_vec();
|
||||
Ok(Packet::UserData { client_id, data })
|
||||
}
|
||||
TYPE_RELIABLE => {
|
||||
if payload.len() < 2 {
|
||||
return Err("Reliable payload too short".to_string());
|
||||
}
|
||||
let sequence = u16::from_le_bytes([payload[0], payload[1]]);
|
||||
let data = payload[2..].to_vec();
|
||||
Ok(Packet::Reliable { sequence, data })
|
||||
}
|
||||
TYPE_ACK => {
|
||||
if payload.len() < 2 {
|
||||
return Err("Ack payload too short".to_string());
|
||||
}
|
||||
let sequence = u16::from_le_bytes([payload[0], payload[1]]);
|
||||
Ok(Packet::Ack { sequence })
|
||||
}
|
||||
TYPE_SNAPSHOT => {
|
||||
if payload.len() < 4 {
|
||||
return Err("Snapshot payload too short".to_string());
|
||||
}
|
||||
let tick = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
|
||||
let data = payload[4..].to_vec();
|
||||
Ok(Packet::Snapshot { tick, data })
|
||||
}
|
||||
TYPE_SNAPSHOT_DELTA => {
|
||||
if payload.len() < 8 {
|
||||
return Err("SnapshotDelta payload too short".to_string());
|
||||
}
|
||||
let base_tick = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
|
||||
let tick = u32::from_le_bytes([payload[4], payload[5], payload[6], payload[7]]);
|
||||
let data = payload[8..].to_vec();
|
||||
Ok(Packet::SnapshotDelta { base_tick, tick, data })
|
||||
}
|
||||
_ => Err(format!("Unknown packet type_id: {}", type_id)),
|
||||
}
|
||||
}
|
||||
@@ -124,6 +164,10 @@ impl Packet {
|
||||
Packet::Ping { .. } => TYPE_PING,
|
||||
Packet::Pong { .. } => TYPE_PONG,
|
||||
Packet::UserData { .. } => TYPE_USER_DATA,
|
||||
Packet::Reliable { .. } => TYPE_RELIABLE,
|
||||
Packet::Ack { .. } => TYPE_ACK,
|
||||
Packet::Snapshot { .. } => TYPE_SNAPSHOT,
|
||||
Packet::SnapshotDelta { .. } => TYPE_SNAPSHOT_DELTA,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,6 +191,26 @@ impl Packet {
|
||||
buf.extend_from_slice(data);
|
||||
buf
|
||||
}
|
||||
Packet::Reliable { sequence, data } => {
|
||||
let mut buf = Vec::with_capacity(2 + data.len());
|
||||
buf.extend_from_slice(&sequence.to_le_bytes());
|
||||
buf.extend_from_slice(data);
|
||||
buf
|
||||
}
|
||||
Packet::Ack { sequence } => sequence.to_le_bytes().to_vec(),
|
||||
Packet::Snapshot { tick, data } => {
|
||||
let mut buf = Vec::with_capacity(4 + data.len());
|
||||
buf.extend_from_slice(&tick.to_le_bytes());
|
||||
buf.extend_from_slice(data);
|
||||
buf
|
||||
}
|
||||
Packet::SnapshotDelta { base_tick, tick, data } => {
|
||||
let mut buf = Vec::with_capacity(8 + data.len());
|
||||
buf.extend_from_slice(&base_tick.to_le_bytes());
|
||||
buf.extend_from_slice(&tick.to_le_bytes());
|
||||
buf.extend_from_slice(data);
|
||||
buf
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -200,6 +264,36 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reliable_roundtrip() {
|
||||
roundtrip(Packet::Reliable {
|
||||
sequence: 42,
|
||||
data: vec![0xCA, 0xFE],
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ack_roundtrip() {
|
||||
roundtrip(Packet::Ack { sequence: 100 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_roundtrip() {
|
||||
roundtrip(Packet::Snapshot {
|
||||
tick: 999,
|
||||
data: vec![1, 2, 3],
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_delta_roundtrip() {
|
||||
roundtrip(Packet::SnapshotDelta {
|
||||
base_tick: 10,
|
||||
tick: 15,
|
||||
data: vec![4, 5, 6],
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_type_returns_error() {
|
||||
// Build a packet with type_id = 99 (unknown)
|
||||
|
||||
318
crates/voltex_net/src/reliable.rs
Normal file
318
crates/voltex_net/src/reliable.rs
Normal file
@@ -0,0 +1,318 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// A channel that provides reliable delivery over unreliable transport.
|
||||
///
|
||||
/// Assigns sequence numbers, tracks ACKs, estimates RTT,
|
||||
/// and retransmits unacknowledged packets after 2x RTT.
|
||||
pub struct ReliableChannel {
|
||||
next_sequence: u16,
|
||||
pending_acks: HashMap<u16, (Instant, Vec<u8>)>,
|
||||
received_seqs: HashSet<u16>,
|
||||
rtt: Duration,
|
||||
/// Outgoing ACK packets that need to be sent by the caller.
|
||||
outgoing_acks: Vec<u16>,
|
||||
}
|
||||
|
||||
impl ReliableChannel {
|
||||
pub fn new() -> Self {
|
||||
ReliableChannel {
|
||||
next_sequence: 0,
|
||||
pending_acks: HashMap::new(),
|
||||
received_seqs: HashSet::new(),
|
||||
rtt: Duration::from_millis(100), // initial estimate
|
||||
outgoing_acks: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current RTT estimate.
|
||||
pub fn rtt(&self) -> Duration {
|
||||
self.rtt
|
||||
}
|
||||
|
||||
/// Returns the number of packets awaiting acknowledgement.
|
||||
pub fn pending_count(&self) -> usize {
|
||||
self.pending_acks.len()
|
||||
}
|
||||
|
||||
/// Prepare a reliable send. Returns (sequence_number, wrapped_data).
|
||||
/// The caller is responsible for actually transmitting the wrapped data.
|
||||
pub fn send_reliable(&mut self, data: &[u8]) -> (u16, Vec<u8>) {
|
||||
let seq = self.next_sequence;
|
||||
self.next_sequence = self.next_sequence.wrapping_add(1);
|
||||
|
||||
// Build the reliable packet payload: [seq(2 LE), data...]
|
||||
let mut buf = Vec::with_capacity(2 + data.len());
|
||||
buf.extend_from_slice(&seq.to_le_bytes());
|
||||
buf.extend_from_slice(data);
|
||||
|
||||
self.pending_acks.insert(seq, (Instant::now(), buf.clone()));
|
||||
|
||||
(seq, buf)
|
||||
}
|
||||
|
||||
/// Process a received reliable packet. Returns the payload data if this is
|
||||
/// not a duplicate, or None if already received. Queues an ACK to send.
|
||||
pub fn receive_and_ack(&mut self, sequence: u16, data: &[u8]) -> Option<Vec<u8>> {
|
||||
// Always queue an ACK, even for duplicates
|
||||
self.outgoing_acks.push(sequence);
|
||||
|
||||
if self.received_seqs.contains(&sequence) {
|
||||
return None; // duplicate
|
||||
}
|
||||
|
||||
self.received_seqs.insert(sequence);
|
||||
Some(data.to_vec())
|
||||
}
|
||||
|
||||
/// Process an incoming ACK for a sequence we sent.
|
||||
pub fn process_ack(&mut self, sequence: u16) {
|
||||
if let Some((send_time, _)) = self.pending_acks.remove(&sequence) {
|
||||
let sample = send_time.elapsed();
|
||||
// Exponential moving average: rtt = 0.875 * rtt + 0.125 * sample
|
||||
self.rtt = Duration::from_secs_f64(
|
||||
0.875 * self.rtt.as_secs_f64() + 0.125 * sample.as_secs_f64(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Drain any pending outgoing ACK sequence numbers.
|
||||
pub fn drain_acks(&mut self) -> Vec<u16> {
|
||||
std::mem::take(&mut self.outgoing_acks)
|
||||
}
|
||||
|
||||
/// Check for timed-out packets and return their data for retransmission.
|
||||
/// Resets the send_time for retransmitted packets.
|
||||
pub fn update(&mut self) -> Vec<Vec<u8>> {
|
||||
let timeout = self.rtt * 2;
|
||||
let now = Instant::now();
|
||||
let mut retransmits = Vec::new();
|
||||
|
||||
for (_, (send_time, data)) in self.pending_acks.iter_mut() {
|
||||
if now.duration_since(*send_time) >= timeout {
|
||||
retransmits.push(data.clone());
|
||||
*send_time = now;
|
||||
}
|
||||
}
|
||||
|
||||
retransmits
|
||||
}
|
||||
}
|
||||
|
||||
/// A channel that delivers packets in order, built on top of ReliableChannel.
|
||||
pub struct OrderedChannel {
|
||||
reliable: ReliableChannel,
|
||||
next_deliver: u16,
|
||||
buffer: HashMap<u16, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl OrderedChannel {
|
||||
pub fn new() -> Self {
|
||||
OrderedChannel {
|
||||
reliable: ReliableChannel::new(),
|
||||
next_deliver: 0,
|
||||
buffer: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Access the underlying reliable channel (e.g., for send_reliable, process_ack, update).
|
||||
pub fn reliable(&self) -> &ReliableChannel {
|
||||
&self.reliable
|
||||
}
|
||||
|
||||
/// Access the underlying reliable channel mutably.
|
||||
pub fn reliable_mut(&mut self) -> &mut ReliableChannel {
|
||||
&mut self.reliable
|
||||
}
|
||||
|
||||
/// Prepare a reliable, ordered send.
|
||||
pub fn send(&mut self, data: &[u8]) -> (u16, Vec<u8>) {
|
||||
self.reliable.send_reliable(data)
|
||||
}
|
||||
|
||||
/// Receive a packet. Buffers out-of-order packets and returns all
|
||||
/// packets that can now be delivered in sequence order.
|
||||
pub fn receive(&mut self, sequence: u16, data: &[u8]) -> Vec<Vec<u8>> {
|
||||
let payload = self.reliable.receive_and_ack(sequence, data);
|
||||
|
||||
if let Some(payload) = payload {
|
||||
self.buffer.insert(sequence, payload);
|
||||
}
|
||||
|
||||
// Deliver as many consecutive packets as possible
|
||||
let mut delivered = Vec::new();
|
||||
while let Some(data) = self.buffer.remove(&self.next_deliver) {
|
||||
delivered.push(data);
|
||||
self.next_deliver = self.next_deliver.wrapping_add(1);
|
||||
}
|
||||
|
||||
delivered
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// ---- ReliableChannel tests ----
|
||||
|
||||
#[test]
|
||||
fn test_send_receive_ack_roundtrip() {
|
||||
let mut sender = ReliableChannel::new();
|
||||
let mut receiver = ReliableChannel::new();
|
||||
|
||||
let original = b"hello world";
|
||||
let (seq, _buf) = sender.send_reliable(original);
|
||||
assert_eq!(seq, 0);
|
||||
assert_eq!(sender.pending_count(), 1);
|
||||
|
||||
// Receiver gets the packet
|
||||
let result = receiver.receive_and_ack(seq, original);
|
||||
assert_eq!(result, Some(original.to_vec()));
|
||||
|
||||
// Receiver queued an ack
|
||||
let acks = receiver.drain_acks();
|
||||
assert_eq!(acks, vec![0]);
|
||||
|
||||
// Sender processes the ack
|
||||
sender.process_ack(seq);
|
||||
assert_eq!(sender.pending_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_rejection() {
|
||||
let mut receiver = ReliableChannel::new();
|
||||
|
||||
let data = b"payload";
|
||||
let result1 = receiver.receive_and_ack(0, data);
|
||||
assert!(result1.is_some());
|
||||
|
||||
let result2 = receiver.receive_and_ack(0, data);
|
||||
assert!(result2.is_none(), "Duplicate should be rejected");
|
||||
|
||||
// But ACK is still queued for both
|
||||
let acks = receiver.drain_acks();
|
||||
assert_eq!(acks.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sequence_numbers_increment() {
|
||||
let mut channel = ReliableChannel::new();
|
||||
|
||||
let (s0, _) = channel.send_reliable(b"a");
|
||||
let (s1, _) = channel.send_reliable(b"b");
|
||||
let (s2, _) = channel.send_reliable(b"c");
|
||||
|
||||
assert_eq!(s0, 0);
|
||||
assert_eq!(s1, 1);
|
||||
assert_eq!(s2, 2);
|
||||
assert_eq!(channel.pending_count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retransmission_on_timeout() {
|
||||
let mut channel = ReliableChannel::new();
|
||||
// Set a very short RTT so timeout (2*RTT) triggers quickly
|
||||
channel.rtt = Duration::from_millis(1);
|
||||
|
||||
let (_seq, _buf) = channel.send_reliable(b"data");
|
||||
assert_eq!(channel.pending_count(), 1);
|
||||
|
||||
// Wait for timeout
|
||||
std::thread::sleep(Duration::from_millis(10));
|
||||
|
||||
let retransmits = channel.update();
|
||||
assert_eq!(retransmits.len(), 1, "Should retransmit 1 packet");
|
||||
// Packet is still pending (not acked)
|
||||
assert_eq!(channel.pending_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_retransmission_before_timeout() {
|
||||
let mut channel = ReliableChannel::new();
|
||||
// Default RTT = 100ms, so timeout = 200ms
|
||||
let (_seq, _buf) = channel.send_reliable(b"data");
|
||||
|
||||
// Immediately check — should not retransmit
|
||||
let retransmits = channel.update();
|
||||
assert!(retransmits.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rtt_estimation() {
|
||||
let mut channel = ReliableChannel::new();
|
||||
let initial_rtt = channel.rtt();
|
||||
|
||||
let (seq, _) = channel.send_reliable(b"x");
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
channel.process_ack(seq);
|
||||
|
||||
// RTT should have changed from initial value
|
||||
let new_rtt = channel.rtt();
|
||||
assert_ne!(initial_rtt, new_rtt, "RTT should be updated after ACK");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrapping_sequence() {
|
||||
let mut channel = ReliableChannel::new();
|
||||
channel.next_sequence = u16::MAX;
|
||||
|
||||
let (s1, _) = channel.send_reliable(b"a");
|
||||
assert_eq!(s1, u16::MAX);
|
||||
|
||||
let (s2, _) = channel.send_reliable(b"b");
|
||||
assert_eq!(s2, 0); // wrapped
|
||||
}
|
||||
|
||||
// ---- OrderedChannel tests ----
|
||||
|
||||
#[test]
|
||||
fn test_ordered_in_order_delivery() {
|
||||
let mut channel = OrderedChannel::new();
|
||||
|
||||
let delivered0 = channel.receive(0, b"first");
|
||||
assert_eq!(delivered0, vec![b"first".to_vec()]);
|
||||
|
||||
let delivered1 = channel.receive(1, b"second");
|
||||
assert_eq!(delivered1, vec![b"second".to_vec()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ordered_out_of_order_delivery() {
|
||||
let mut channel = OrderedChannel::new();
|
||||
|
||||
// Receive seq 1 first (out of order)
|
||||
let delivered = channel.receive(1, b"second");
|
||||
assert!(delivered.is_empty(), "Seq 1 should be buffered, waiting for 0");
|
||||
|
||||
// Receive seq 2 (still missing 0)
|
||||
let delivered = channel.receive(2, b"third");
|
||||
assert!(delivered.is_empty());
|
||||
|
||||
// Receive seq 0 — should deliver 0, 1, 2 in order
|
||||
let delivered = channel.receive(0, b"first");
|
||||
assert_eq!(delivered.len(), 3);
|
||||
assert_eq!(delivered[0], b"first");
|
||||
assert_eq!(delivered[1], b"second");
|
||||
assert_eq!(delivered[2], b"third");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ordered_gap_handling() {
|
||||
let mut channel = OrderedChannel::new();
|
||||
|
||||
// Deliver 0
|
||||
let d = channel.receive(0, b"a");
|
||||
assert_eq!(d.len(), 1);
|
||||
|
||||
// Skip 1, deliver 2
|
||||
let d = channel.receive(2, b"c");
|
||||
assert!(d.is_empty(), "Can't deliver 2 without 1");
|
||||
|
||||
// Now deliver 1 — should flush both 1 and 2
|
||||
let d = channel.receive(1, b"b");
|
||||
assert_eq!(d.len(), 2);
|
||||
assert_eq!(d[0], b"b");
|
||||
assert_eq!(d[1], b"c");
|
||||
}
|
||||
}
|
||||
378
crates/voltex_net/src/snapshot.rs
Normal file
378
crates/voltex_net/src/snapshot.rs
Normal file
@@ -0,0 +1,378 @@
|
||||
/// State of a single entity at a point in time.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct EntityState {
|
||||
pub id: u32,
|
||||
pub position: [f32; 3],
|
||||
pub rotation: [f32; 3],
|
||||
pub velocity: [f32; 3],
|
||||
}
|
||||
|
||||
/// A snapshot of the world at a given tick.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Snapshot {
|
||||
pub tick: u32,
|
||||
pub entities: Vec<EntityState>,
|
||||
}
|
||||
|
||||
/// Binary size of one entity: id(4) + pos(12) + rot(12) + vel(12) = 40 bytes
|
||||
const ENTITY_SIZE: usize = 4 + 12 + 12 + 12;
|
||||
|
||||
fn write_f32_le(buf: &mut Vec<u8>, v: f32) {
|
||||
buf.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
|
||||
fn read_f32_le(data: &[u8], offset: usize) -> f32 {
|
||||
f32::from_le_bytes([data[offset], data[offset + 1], data[offset + 2], data[offset + 3]])
|
||||
}
|
||||
|
||||
fn write_f32x3(buf: &mut Vec<u8>, v: &[f32; 3]) {
|
||||
write_f32_le(buf, v[0]);
|
||||
write_f32_le(buf, v[1]);
|
||||
write_f32_le(buf, v[2]);
|
||||
}
|
||||
|
||||
fn read_f32x3(data: &[u8], offset: usize) -> [f32; 3] {
|
||||
[
|
||||
read_f32_le(data, offset),
|
||||
read_f32_le(data, offset + 4),
|
||||
read_f32_le(data, offset + 8),
|
||||
]
|
||||
}
|
||||
|
||||
fn serialize_entity(buf: &mut Vec<u8>, e: &EntityState) {
|
||||
buf.extend_from_slice(&e.id.to_le_bytes());
|
||||
write_f32x3(buf, &e.position);
|
||||
write_f32x3(buf, &e.rotation);
|
||||
write_f32x3(buf, &e.velocity);
|
||||
}
|
||||
|
||||
fn deserialize_entity(data: &[u8], offset: usize) -> EntityState {
|
||||
let id = u32::from_le_bytes([
|
||||
data[offset], data[offset + 1], data[offset + 2], data[offset + 3],
|
||||
]);
|
||||
let position = read_f32x3(data, offset + 4);
|
||||
let rotation = read_f32x3(data, offset + 16);
|
||||
let velocity = read_f32x3(data, offset + 28);
|
||||
EntityState { id, position, rotation, velocity }
|
||||
}
|
||||
|
||||
/// Serialize a snapshot into compact binary format.
|
||||
/// Layout: tick(4 LE) + entity_count(4 LE) + entities...
|
||||
pub fn serialize_snapshot(snapshot: &Snapshot) -> Vec<u8> {
|
||||
let count = snapshot.entities.len() as u32;
|
||||
let mut buf = Vec::with_capacity(8 + ENTITY_SIZE * snapshot.entities.len());
|
||||
buf.extend_from_slice(&snapshot.tick.to_le_bytes());
|
||||
buf.extend_from_slice(&count.to_le_bytes());
|
||||
for e in &snapshot.entities {
|
||||
serialize_entity(&mut buf, e);
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize a snapshot from binary data.
|
||||
pub fn deserialize_snapshot(data: &[u8]) -> Result<Snapshot, String> {
|
||||
if data.len() < 8 {
|
||||
return Err("Snapshot data too short for header".to_string());
|
||||
}
|
||||
let tick = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
|
||||
let count = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
|
||||
|
||||
let expected = 8 + count * ENTITY_SIZE;
|
||||
if data.len() < expected {
|
||||
return Err(format!(
|
||||
"Snapshot data too short: expected {} bytes, got {}",
|
||||
expected,
|
||||
data.len()
|
||||
));
|
||||
}
|
||||
|
||||
let mut entities = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
entities.push(deserialize_entity(data, 8 + i * ENTITY_SIZE));
|
||||
}
|
||||
|
||||
Ok(Snapshot { tick, entities })
|
||||
}
|
||||
|
||||
/// Compute a delta between two snapshots.
|
||||
/// Format: new_tick(4) + count(4) + [id(4) + flags(1) + changed_fields...]
|
||||
/// Flags bitmask: 0x01 = position, 0x02 = rotation, 0x04 = velocity, 0x80 = new entity (full)
|
||||
pub fn diff_snapshots(old: &Snapshot, new: &Snapshot) -> Vec<u8> {
|
||||
use std::collections::HashMap;
|
||||
|
||||
let old_map: HashMap<u32, &EntityState> = old.entities.iter().map(|e| (e.id, e)).collect();
|
||||
|
||||
let mut entries: Vec<u8> = Vec::new();
|
||||
let mut count: u32 = 0;
|
||||
|
||||
for new_ent in &new.entities {
|
||||
if let Some(old_ent) = old_map.get(&new_ent.id) {
|
||||
let mut flags: u8 = 0;
|
||||
let mut fields = Vec::new();
|
||||
|
||||
if new_ent.position != old_ent.position {
|
||||
flags |= 0x01;
|
||||
write_f32x3(&mut fields, &new_ent.position);
|
||||
}
|
||||
if new_ent.rotation != old_ent.rotation {
|
||||
flags |= 0x02;
|
||||
write_f32x3(&mut fields, &new_ent.rotation);
|
||||
}
|
||||
if new_ent.velocity != old_ent.velocity {
|
||||
flags |= 0x04;
|
||||
write_f32x3(&mut fields, &new_ent.velocity);
|
||||
}
|
||||
|
||||
if flags != 0 {
|
||||
entries.extend_from_slice(&new_ent.id.to_le_bytes());
|
||||
entries.push(flags);
|
||||
entries.extend_from_slice(&fields);
|
||||
count += 1;
|
||||
}
|
||||
} else {
|
||||
// New entity — send full state
|
||||
entries.extend_from_slice(&new_ent.id.to_le_bytes());
|
||||
entries.push(0x80); // "new entity" flag
|
||||
write_f32x3(&mut entries, &new_ent.position);
|
||||
write_f32x3(&mut entries, &new_ent.rotation);
|
||||
write_f32x3(&mut entries, &new_ent.velocity);
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut buf = Vec::with_capacity(8 + entries.len());
|
||||
buf.extend_from_slice(&new.tick.to_le_bytes());
|
||||
buf.extend_from_slice(&count.to_le_bytes());
|
||||
buf.extend_from_slice(&entries);
|
||||
buf
|
||||
}
|
||||
|
||||
/// Apply a delta to a base snapshot to produce an updated snapshot.
|
||||
pub fn apply_diff(base: &Snapshot, diff: &[u8]) -> Result<Snapshot, String> {
|
||||
if diff.len() < 8 {
|
||||
return Err("Diff data too short for header".to_string());
|
||||
}
|
||||
|
||||
let tick = u32::from_le_bytes([diff[0], diff[1], diff[2], diff[3]]);
|
||||
let count = u32::from_le_bytes([diff[4], diff[5], diff[6], diff[7]]) as usize;
|
||||
|
||||
// Start from a clone of the base
|
||||
let mut entities: Vec<EntityState> = base.entities.clone();
|
||||
let mut offset = 8;
|
||||
|
||||
for _ in 0..count {
|
||||
if offset + 5 > diff.len() {
|
||||
return Err("Diff truncated at entry header".to_string());
|
||||
}
|
||||
let id = u32::from_le_bytes([diff[offset], diff[offset + 1], diff[offset + 2], diff[offset + 3]]);
|
||||
let flags = diff[offset + 4];
|
||||
offset += 5;
|
||||
|
||||
if flags & 0x80 != 0 {
|
||||
// New entity — full state
|
||||
if offset + 36 > diff.len() {
|
||||
return Err("Diff truncated at new entity data".to_string());
|
||||
}
|
||||
let position = read_f32x3(diff, offset);
|
||||
let rotation = read_f32x3(diff, offset + 12);
|
||||
let velocity = read_f32x3(diff, offset + 24);
|
||||
offset += 36;
|
||||
|
||||
// Add or replace
|
||||
if let Some(ent) = entities.iter_mut().find(|e| e.id == id) {
|
||||
ent.position = position;
|
||||
ent.rotation = rotation;
|
||||
ent.velocity = velocity;
|
||||
} else {
|
||||
entities.push(EntityState { id, position, rotation, velocity });
|
||||
}
|
||||
} else {
|
||||
// Delta update — find existing entity
|
||||
let ent = entities.iter_mut().find(|e| e.id == id)
|
||||
.ok_or_else(|| format!("Diff references unknown entity {}", id))?;
|
||||
|
||||
if flags & 0x01 != 0 {
|
||||
if offset + 12 > diff.len() {
|
||||
return Err("Diff truncated at position".to_string());
|
||||
}
|
||||
ent.position = read_f32x3(diff, offset);
|
||||
offset += 12;
|
||||
}
|
||||
if flags & 0x02 != 0 {
|
||||
if offset + 12 > diff.len() {
|
||||
return Err("Diff truncated at rotation".to_string());
|
||||
}
|
||||
ent.rotation = read_f32x3(diff, offset);
|
||||
offset += 12;
|
||||
}
|
||||
if flags & 0x04 != 0 {
|
||||
if offset + 12 > diff.len() {
|
||||
return Err("Diff truncated at velocity".to_string());
|
||||
}
|
||||
ent.velocity = read_f32x3(diff, offset);
|
||||
offset += 12;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Snapshot { tick, entities })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_entity(id: u32, px: f32, py: f32, pz: f32) -> EntityState {
|
||||
EntityState {
|
||||
id,
|
||||
position: [px, py, pz],
|
||||
rotation: [0.0, 0.0, 0.0],
|
||||
velocity: [0.0, 0.0, 0.0],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_roundtrip() {
|
||||
let snap = Snapshot {
|
||||
tick: 42,
|
||||
entities: vec![
|
||||
make_entity(1, 1.0, 2.0, 3.0),
|
||||
make_entity(2, 4.0, 5.0, 6.0),
|
||||
],
|
||||
};
|
||||
|
||||
let bytes = serialize_snapshot(&snap);
|
||||
let decoded = deserialize_snapshot(&bytes).expect("deserialize failed");
|
||||
assert_eq!(snap, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_empty() {
|
||||
let snap = Snapshot { tick: 0, entities: vec![] };
|
||||
let bytes = serialize_snapshot(&snap);
|
||||
assert_eq!(bytes.len(), 8); // just header
|
||||
let decoded = deserialize_snapshot(&bytes).unwrap();
|
||||
assert_eq!(snap, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff_no_changes() {
|
||||
let snap = Snapshot {
|
||||
tick: 10,
|
||||
entities: vec![make_entity(1, 1.0, 2.0, 3.0)],
|
||||
};
|
||||
let snap2 = Snapshot {
|
||||
tick: 11,
|
||||
entities: vec![make_entity(1, 1.0, 2.0, 3.0)],
|
||||
};
|
||||
|
||||
let diff = diff_snapshots(&snap, &snap2);
|
||||
// Header only: tick(4) + count(4) = 8, count = 0
|
||||
assert_eq!(diff.len(), 8);
|
||||
let count = u32::from_le_bytes([diff[4], diff[5], diff[6], diff[7]]);
|
||||
assert_eq!(count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff_position_changed() {
|
||||
let old = Snapshot {
|
||||
tick: 10,
|
||||
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
|
||||
};
|
||||
let new = Snapshot {
|
||||
tick: 11,
|
||||
entities: vec![EntityState {
|
||||
id: 1,
|
||||
position: [1.0, 2.0, 3.0],
|
||||
rotation: [0.0, 0.0, 0.0],
|
||||
velocity: [0.0, 0.0, 0.0],
|
||||
}],
|
||||
};
|
||||
|
||||
let diff = diff_snapshots(&old, &new);
|
||||
let result = apply_diff(&old, &diff).expect("apply_diff failed");
|
||||
|
||||
assert_eq!(result.tick, 11);
|
||||
assert_eq!(result.entities.len(), 1);
|
||||
assert_eq!(result.entities[0].position, [1.0, 2.0, 3.0]);
|
||||
assert_eq!(result.entities[0].rotation, [0.0, 0.0, 0.0]); // unchanged
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff_new_entity() {
|
||||
let old = Snapshot {
|
||||
tick: 10,
|
||||
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
|
||||
};
|
||||
let new = Snapshot {
|
||||
tick: 11,
|
||||
entities: vec![
|
||||
make_entity(1, 0.0, 0.0, 0.0),
|
||||
make_entity(2, 5.0, 6.0, 7.0),
|
||||
],
|
||||
};
|
||||
|
||||
let diff = diff_snapshots(&old, &new);
|
||||
let result = apply_diff(&old, &diff).expect("apply_diff failed");
|
||||
|
||||
assert_eq!(result.entities.len(), 2);
|
||||
assert_eq!(result.entities[1].id, 2);
|
||||
assert_eq!(result.entities[1].position, [5.0, 6.0, 7.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff_multiple_fields_changed() {
|
||||
let old = Snapshot {
|
||||
tick: 10,
|
||||
entities: vec![EntityState {
|
||||
id: 1,
|
||||
position: [0.0, 0.0, 0.0],
|
||||
rotation: [0.0, 0.0, 0.0],
|
||||
velocity: [0.0, 0.0, 0.0],
|
||||
}],
|
||||
};
|
||||
let new = Snapshot {
|
||||
tick: 11,
|
||||
entities: vec![EntityState {
|
||||
id: 1,
|
||||
position: [1.0, 1.0, 1.0],
|
||||
rotation: [2.0, 2.0, 2.0],
|
||||
velocity: [3.0, 3.0, 3.0],
|
||||
}],
|
||||
};
|
||||
|
||||
let diff = diff_snapshots(&old, &new);
|
||||
let result = apply_diff(&old, &diff).unwrap();
|
||||
|
||||
assert_eq!(result.entities[0].position, [1.0, 1.0, 1.0]);
|
||||
assert_eq!(result.entities[0].rotation, [2.0, 2.0, 2.0]);
|
||||
assert_eq!(result.entities[0].velocity, [3.0, 3.0, 3.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff_is_compact() {
|
||||
// Only position changes — diff should be smaller than full snapshot
|
||||
let old = Snapshot {
|
||||
tick: 10,
|
||||
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
|
||||
};
|
||||
let new = Snapshot {
|
||||
tick: 11,
|
||||
entities: vec![EntityState {
|
||||
id: 1,
|
||||
position: [1.0, 2.0, 3.0],
|
||||
rotation: [0.0, 0.0, 0.0],
|
||||
velocity: [0.0, 0.0, 0.0],
|
||||
}],
|
||||
};
|
||||
|
||||
let full_bytes = serialize_snapshot(&new);
|
||||
let diff_bytes = diff_snapshots(&old, &new);
|
||||
assert!(
|
||||
diff_bytes.len() < full_bytes.len(),
|
||||
"Diff ({} bytes) should be smaller than full snapshot ({} bytes)",
|
||||
diff_bytes.len(),
|
||||
full_bytes.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use voltex_ecs::Entity;
|
||||
use voltex_math::AABB;
|
||||
use voltex_math::{AABB, Ray};
|
||||
use crate::ray::ray_vs_aabb;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum BvhNode {
|
||||
@@ -72,34 +73,131 @@ impl BvhTree {
|
||||
idx
|
||||
}
|
||||
|
||||
/// Query all overlapping pairs using recursive tree traversal (replaces N² brute force).
|
||||
pub fn query_pairs(&self) -> Vec<(Entity, Entity)> {
|
||||
let mut pairs = Vec::new();
|
||||
if self.nodes.is_empty() {
|
||||
return pairs;
|
||||
}
|
||||
let root = self.nodes.len() - 1;
|
||||
let mut leaves = Vec::new();
|
||||
self.collect_leaves(root, &mut leaves);
|
||||
for i in 0..leaves.len() {
|
||||
for j in (i + 1)..leaves.len() {
|
||||
let (ea, aabb_a) = leaves[i];
|
||||
let (eb, aabb_b) = leaves[j];
|
||||
if aabb_a.intersects(&aabb_b) {
|
||||
pairs.push((ea, eb));
|
||||
}
|
||||
}
|
||||
}
|
||||
self.query_pairs_recursive(root, root, &mut pairs);
|
||||
pairs
|
||||
}
|
||||
|
||||
fn collect_leaves(&self, node_idx: usize, out: &mut Vec<(Entity, AABB)>) {
|
||||
match &self.nodes[node_idx] {
|
||||
fn query_pairs_recursive(&self, a: usize, b: usize, pairs: &mut Vec<(Entity, Entity)>) {
|
||||
let aabb_a = self.node_aabb(a);
|
||||
let aabb_b = self.node_aabb(b);
|
||||
|
||||
if !aabb_a.intersects(&aabb_b) {
|
||||
return;
|
||||
}
|
||||
|
||||
match (&self.nodes[a], &self.nodes[b]) {
|
||||
(BvhNode::Leaf { entity: ea, aabb: aabb_a }, BvhNode::Leaf { entity: eb, aabb: aabb_b }) => {
|
||||
if a != b && ea.id <= eb.id && aabb_a.intersects(aabb_b) {
|
||||
pairs.push((*ea, *eb));
|
||||
}
|
||||
}
|
||||
(BvhNode::Leaf { .. }, BvhNode::Internal { left, right, .. }) => {
|
||||
self.query_pairs_recursive(a, *left, pairs);
|
||||
self.query_pairs_recursive(a, *right, pairs);
|
||||
}
|
||||
(BvhNode::Internal { left, right, .. }, BvhNode::Leaf { .. }) => {
|
||||
self.query_pairs_recursive(*left, b, pairs);
|
||||
self.query_pairs_recursive(*right, b, pairs);
|
||||
}
|
||||
(BvhNode::Internal { left: la, right: ra, .. }, BvhNode::Internal { left: lb, right: rb, .. }) => {
|
||||
if a == b {
|
||||
// Same node: check children against each other and themselves
|
||||
let la = *la;
|
||||
let ra = *ra;
|
||||
self.query_pairs_recursive(la, la, pairs);
|
||||
self.query_pairs_recursive(ra, ra, pairs);
|
||||
self.query_pairs_recursive(la, ra, pairs);
|
||||
} else {
|
||||
let la = *la;
|
||||
let ra = *ra;
|
||||
let lb = *lb;
|
||||
let rb = *rb;
|
||||
self.query_pairs_recursive(la, lb, pairs);
|
||||
self.query_pairs_recursive(la, rb, pairs);
|
||||
self.query_pairs_recursive(ra, lb, pairs);
|
||||
self.query_pairs_recursive(ra, rb, pairs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn node_aabb(&self, idx: usize) -> &AABB {
|
||||
match &self.nodes[idx] {
|
||||
BvhNode::Leaf { aabb, .. } => aabb,
|
||||
BvhNode::Internal { aabb, .. } => aabb,
|
||||
}
|
||||
}
|
||||
|
||||
/// Query ray against BVH, returning all (Entity, t) hits sorted by t.
|
||||
pub fn query_ray(&self, ray: &Ray, max_t: f32) -> Vec<(Entity, f32)> {
|
||||
let mut hits = Vec::new();
|
||||
if self.nodes.is_empty() {
|
||||
return hits;
|
||||
}
|
||||
let root = self.nodes.len() - 1;
|
||||
self.query_ray_recursive(root, ray, max_t, &mut hits);
|
||||
hits.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
|
||||
hits
|
||||
}
|
||||
|
||||
fn query_ray_recursive(&self, idx: usize, ray: &Ray, max_t: f32, hits: &mut Vec<(Entity, f32)>) {
|
||||
let aabb = self.node_aabb(idx);
|
||||
match ray_vs_aabb(ray, aabb) {
|
||||
Some(t) if t <= max_t => {}
|
||||
_ => return,
|
||||
}
|
||||
|
||||
match &self.nodes[idx] {
|
||||
BvhNode::Leaf { entity, aabb } => {
|
||||
out.push((*entity, *aabb));
|
||||
if let Some(t) = ray_vs_aabb(ray, aabb) {
|
||||
if t <= max_t {
|
||||
hits.push((*entity, t));
|
||||
}
|
||||
}
|
||||
}
|
||||
BvhNode::Internal { left, right, .. } => {
|
||||
self.collect_leaves(*left, out);
|
||||
self.collect_leaves(*right, out);
|
||||
self.query_ray_recursive(*left, ray, max_t, hits);
|
||||
self.query_ray_recursive(*right, ray, max_t, hits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Refit the BVH: update leaf AABBs and propagate changes to parents.
|
||||
/// `updated` maps entity → new AABB. Leaves not in map are unchanged.
|
||||
pub fn refit(&mut self, updated: &[(Entity, AABB)]) {
|
||||
if self.nodes.is_empty() {
|
||||
return;
|
||||
}
|
||||
let root = self.nodes.len() - 1;
|
||||
self.refit_recursive(root, updated);
|
||||
}
|
||||
|
||||
fn refit_recursive(&mut self, idx: usize, updated: &[(Entity, AABB)]) -> AABB {
|
||||
match self.nodes[idx] {
|
||||
BvhNode::Leaf { entity, ref mut aabb } => {
|
||||
if let Some((_, new_aabb)) = updated.iter().find(|(e, _)| *e == entity) {
|
||||
*aabb = *new_aabb;
|
||||
}
|
||||
*aabb
|
||||
}
|
||||
BvhNode::Internal { left, right, aabb: _ } => {
|
||||
let left = left;
|
||||
let right = right;
|
||||
let left_aabb = self.refit_recursive(left, updated);
|
||||
let right_aabb = self.refit_recursive(right, updated);
|
||||
let new_aabb = left_aabb.merged(&right_aabb);
|
||||
// Update the internal node's AABB
|
||||
if let BvhNode::Internal { ref mut aabb, .. } = self.nodes[idx] {
|
||||
*aabb = new_aabb;
|
||||
}
|
||||
new_aabb
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -163,4 +261,117 @@ mod tests {
|
||||
let (a, b) = pairs[0];
|
||||
assert!((a.id == 0 && b.id == 1) || (a.id == 1 && b.id == 0));
|
||||
}
|
||||
|
||||
// --- query_pairs: verify recursive gives same results as brute force ---
|
||||
|
||||
#[test]
|
||||
fn test_query_pairs_matches_brute_force() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
|
||||
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
|
||||
(make_entity(2), AABB::new(Vec3::new(2.5, 2.5, 2.5), Vec3::new(4.0, 4.0, 4.0))),
|
||||
(make_entity(3), AABB::new(Vec3::new(10.0, 10.0, 10.0), Vec3::new(11.0, 11.0, 11.0))),
|
||||
(make_entity(4), AABB::new(Vec3::new(10.5, 10.5, 10.5), Vec3::new(12.0, 12.0, 12.0))),
|
||||
];
|
||||
|
||||
let tree = BvhTree::build(&entries);
|
||||
let mut pairs = tree.query_pairs();
|
||||
pairs.sort_by_key(|(a, b)| (a.id.min(b.id), a.id.max(b.id)));
|
||||
|
||||
// Brute force
|
||||
let mut brute: Vec<(Entity, Entity)> = Vec::new();
|
||||
for i in 0..entries.len() {
|
||||
for j in (i + 1)..entries.len() {
|
||||
if entries[i].1.intersects(&entries[j].1) {
|
||||
let a = entries[i].0;
|
||||
let b = entries[j].0;
|
||||
brute.push(if a.id <= b.id { (a, b) } else { (b, a) });
|
||||
}
|
||||
}
|
||||
}
|
||||
brute.sort_by_key(|(a, b)| (a.id, b.id));
|
||||
|
||||
assert_eq!(pairs.len(), brute.len(), "pair count mismatch: tree={}, brute={}", pairs.len(), brute.len());
|
||||
for (tree_pair, brute_pair) in pairs.iter().zip(brute.iter()) {
|
||||
let t = (tree_pair.0.id.min(tree_pair.1.id), tree_pair.0.id.max(tree_pair.1.id));
|
||||
let b = (brute_pair.0.id, brute_pair.1.id);
|
||||
assert_eq!(t, b);
|
||||
}
|
||||
}
|
||||
|
||||
// --- query_ray tests ---
|
||||
|
||||
#[test]
|
||||
fn test_query_ray_basic() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0))),
|
||||
(make_entity(1), AABB::new(Vec3::new(10.0, 10.0, 10.0), Vec3::new(11.0, 11.0, 11.0))),
|
||||
];
|
||||
let tree = BvhTree::build(&entries);
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = tree.query_ray(&ray, 100.0);
|
||||
assert_eq!(hits.len(), 1);
|
||||
assert_eq!(hits[0].0.id, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_ray_multiple() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::new(2.0, -1.0, -1.0), Vec3::new(4.0, 1.0, 1.0))),
|
||||
(make_entity(1), AABB::new(Vec3::new(6.0, -1.0, -1.0), Vec3::new(8.0, 1.0, 1.0))),
|
||||
];
|
||||
let tree = BvhTree::build(&entries);
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = tree.query_ray(&ray, 100.0);
|
||||
assert_eq!(hits.len(), 2);
|
||||
assert!(hits[0].1 < hits[1].1, "should be sorted by distance");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_ray_miss() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::new(4.0, 4.0, 4.0), Vec3::new(6.0, 6.0, 6.0))),
|
||||
];
|
||||
let tree = BvhTree::build(&entries);
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = tree.query_ray(&ray, 100.0);
|
||||
assert!(hits.is_empty());
|
||||
}
|
||||
|
||||
// --- refit tests ---
|
||||
|
||||
#[test]
|
||||
fn test_refit_updates_leaf() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::ONE)),
|
||||
(make_entity(1), AABB::new(Vec3::new(5.0, 5.0, 5.0), Vec3::new(6.0, 6.0, 6.0))),
|
||||
];
|
||||
let mut tree = BvhTree::build(&entries);
|
||||
|
||||
// Initially separated
|
||||
assert!(tree.query_pairs().is_empty());
|
||||
|
||||
// Move entity 1 to overlap entity 0
|
||||
tree.refit(&[(make_entity(1), AABB::new(Vec3::new(0.5, 0.5, 0.5), Vec3::new(1.5, 1.5, 1.5)))]);
|
||||
|
||||
let pairs = tree.query_pairs();
|
||||
assert_eq!(pairs.len(), 1, "after refit, overlapping entities should be found");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_refit_separates_entities() {
|
||||
let entries = vec![
|
||||
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
|
||||
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
|
||||
];
|
||||
let mut tree = BvhTree::build(&entries);
|
||||
|
||||
// Initially overlapping
|
||||
assert_eq!(tree.query_pairs().len(), 1);
|
||||
|
||||
// Move entity 1 far away
|
||||
tree.refit(&[(make_entity(1), AABB::new(Vec3::new(100.0, 100.0, 100.0), Vec3::new(101.0, 101.0, 101.0)))]);
|
||||
|
||||
assert!(tree.query_pairs().is_empty(), "after refit, separated entities should not overlap");
|
||||
}
|
||||
}
|
||||
|
||||
118
crates/voltex_physics/src/ccd.rs
Normal file
118
crates/voltex_physics/src/ccd.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use voltex_math::{Vec3, AABB, Ray};
|
||||
use crate::ray::ray_vs_aabb;
|
||||
|
||||
/// Swept sphere vs AABB continuous collision detection.
|
||||
/// Expands the AABB by the sphere radius, then tests a ray from start to end.
|
||||
/// Returns t in [0,1] of first contact, or None if no contact.
|
||||
pub fn swept_sphere_vs_aabb(start: Vec3, end: Vec3, radius: f32, aabb: &AABB) -> Option<f32> {
|
||||
// Expand AABB by sphere radius
|
||||
let r = Vec3::new(radius, radius, radius);
|
||||
let expanded = AABB::new(aabb.min - r, aabb.max + r);
|
||||
|
||||
let direction = end - start;
|
||||
let sweep_len = direction.length();
|
||||
|
||||
if sweep_len < 1e-10 {
|
||||
// No movement — check if already inside
|
||||
if expanded.contains_point(start) {
|
||||
return Some(0.0);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
let ray = Ray::new(start, direction * (1.0 / sweep_len));
|
||||
|
||||
match ray_vs_aabb(&ray, &expanded) {
|
||||
Some(t) => {
|
||||
let parametric_t = t / sweep_len;
|
||||
if parametric_t <= 1.0 {
|
||||
Some(parametric_t)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn approx(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 1e-3
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_hits_aabb() {
|
||||
let start = Vec3::new(-10.0, 0.0, 0.0);
|
||||
let end = Vec3::new(10.0, 0.0, 0.0);
|
||||
let radius = 0.5;
|
||||
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
|
||||
|
||||
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
|
||||
// Expanded AABB min.x = 3.5, start.x = -10, direction = 20
|
||||
// t = (3.5 - (-10)) / 20 = 13.5 / 20 = 0.675
|
||||
assert!(t > 0.0 && t < 1.0);
|
||||
assert!(approx(t, 0.675));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_misses_aabb() {
|
||||
let start = Vec3::new(-10.0, 10.0, 0.0);
|
||||
let end = Vec3::new(10.0, 10.0, 0.0);
|
||||
let radius = 0.5;
|
||||
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
|
||||
|
||||
assert!(swept_sphere_vs_aabb(start, end, radius, &aabb).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_starts_inside() {
|
||||
let start = Vec3::new(5.0, 0.0, 0.0);
|
||||
let end = Vec3::new(10.0, 0.0, 0.0);
|
||||
let radius = 0.5;
|
||||
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
|
||||
|
||||
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
|
||||
assert!(approx(t, 0.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_tunneling_detection() {
|
||||
// Fast sphere that would tunnel through a thin wall
|
||||
let start = Vec3::new(-100.0, 0.0, 0.0);
|
||||
let end = Vec3::new(100.0, 0.0, 0.0);
|
||||
let radius = 0.1;
|
||||
// Thin wall at x=0
|
||||
let aabb = AABB::new(Vec3::new(-0.05, -10.0, -10.0), Vec3::new(0.05, 10.0, 10.0));
|
||||
|
||||
let t = swept_sphere_vs_aabb(start, end, radius, &aabb);
|
||||
assert!(t.is_some(), "should detect tunneling through thin wall");
|
||||
let t = t.unwrap();
|
||||
assert!(t > 0.0 && t < 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_no_movement() {
|
||||
let start = Vec3::new(5.0, 0.0, 0.0);
|
||||
let end = Vec3::new(5.0, 0.0, 0.0);
|
||||
let radius = 0.5;
|
||||
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
|
||||
|
||||
// Inside expanded AABB, so should return 0
|
||||
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
|
||||
assert!(approx(t, 0.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swept_sphere_beyond_range() {
|
||||
let start = Vec3::new(-10.0, 0.0, 0.0);
|
||||
let end = Vec3::new(-5.0, 0.0, 0.0);
|
||||
let radius = 0.5;
|
||||
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
|
||||
|
||||
// AABB is at x=4..6, moving from -10 to -5 won't reach it
|
||||
assert!(swept_sphere_vs_aabb(start, end, radius, &aabb).is_none());
|
||||
}
|
||||
}
|
||||
@@ -1,28 +1,102 @@
|
||||
use voltex_ecs::World;
|
||||
use voltex_ecs::Transform;
|
||||
use voltex_math::Vec3;
|
||||
use crate::rigid_body::{RigidBody, PhysicsConfig};
|
||||
use crate::collider::Collider;
|
||||
use crate::rigid_body::{RigidBody, PhysicsConfig, SLEEP_VELOCITY_THRESHOLD, SLEEP_TIME_THRESHOLD};
|
||||
|
||||
/// Compute diagonal inertia tensor for a collider shape.
|
||||
/// Returns Vec3 where each component is the moment of inertia about that axis.
|
||||
pub fn inertia_tensor(collider: &Collider, mass: f32) -> Vec3 {
|
||||
match collider {
|
||||
Collider::Sphere { radius } => {
|
||||
let i = (2.0 / 5.0) * mass * radius * radius;
|
||||
Vec3::new(i, i, i)
|
||||
}
|
||||
Collider::Box { half_extents } => {
|
||||
let w = half_extents.x * 2.0;
|
||||
let h = half_extents.y * 2.0;
|
||||
let d = half_extents.z * 2.0;
|
||||
let factor = mass / 12.0;
|
||||
Vec3::new(
|
||||
factor * (h * h + d * d),
|
||||
factor * (w * w + d * d),
|
||||
factor * (w * w + h * h),
|
||||
)
|
||||
}
|
||||
Collider::Capsule { radius, half_height } => {
|
||||
// Approximate as cylinder with total height = 2*half_height + 2*radius
|
||||
let r = *radius;
|
||||
let h = half_height * 2.0;
|
||||
let ix = mass * (3.0 * r * r + h * h) / 12.0;
|
||||
let iy = mass * r * r / 2.0;
|
||||
let iz = ix;
|
||||
Vec3::new(ix, iy, iz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute inverse inertia (component-wise 1/I). Returns zero for zero-mass or zero-inertia.
|
||||
pub fn inv_inertia(inertia: Vec3) -> Vec3 {
|
||||
Vec3::new(
|
||||
if inertia.x > 1e-10 { 1.0 / inertia.x } else { 0.0 },
|
||||
if inertia.y > 1e-10 { 1.0 / inertia.y } else { 0.0 },
|
||||
if inertia.z > 1e-10 { 1.0 / inertia.z } else { 0.0 },
|
||||
)
|
||||
}
|
||||
|
||||
pub fn integrate(world: &mut World, config: &PhysicsConfig) {
|
||||
// 1. Collect
|
||||
let updates: Vec<(voltex_ecs::Entity, Vec3, Vec3)> = world
|
||||
// 1. Collect linear + angular updates
|
||||
let updates: Vec<(voltex_ecs::Entity, Vec3, Vec3, Vec3)> = world
|
||||
.query2::<Transform, RigidBody>()
|
||||
.into_iter()
|
||||
.filter(|(_, _, rb)| !rb.is_static())
|
||||
.filter(|(_, _, rb)| !rb.is_static() && !rb.is_sleeping)
|
||||
.map(|(entity, transform, rb)| {
|
||||
let new_velocity = rb.velocity + config.gravity * rb.gravity_scale * config.fixed_dt;
|
||||
let new_position = transform.position + new_velocity * config.fixed_dt;
|
||||
(entity, new_velocity, new_position)
|
||||
let new_rotation = transform.rotation + rb.angular_velocity * config.fixed_dt;
|
||||
(entity, new_velocity, new_position, new_rotation)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// 2. Apply
|
||||
for (entity, new_velocity, new_position) in updates {
|
||||
for (entity, new_velocity, new_position, new_rotation) in updates {
|
||||
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
|
||||
rb.velocity = new_velocity;
|
||||
}
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position = new_position;
|
||||
t.rotation = new_rotation;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Update sleep timers
|
||||
update_sleep_timers(world, config);
|
||||
}
|
||||
|
||||
fn update_sleep_timers(world: &mut World, config: &PhysicsConfig) {
|
||||
let sleep_updates: Vec<(voltex_ecs::Entity, bool, f32)> = world
|
||||
.query::<RigidBody>()
|
||||
.filter(|(_, rb)| !rb.is_static())
|
||||
.map(|(entity, rb)| {
|
||||
let speed = rb.velocity.length() + rb.angular_velocity.length();
|
||||
if speed < SLEEP_VELOCITY_THRESHOLD {
|
||||
let new_timer = rb.sleep_timer + config.fixed_dt;
|
||||
let should_sleep = new_timer >= SLEEP_TIME_THRESHOLD;
|
||||
(entity, should_sleep, new_timer)
|
||||
} else {
|
||||
(entity, false, 0.0)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
for (entity, should_sleep, timer) in sleep_updates {
|
||||
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
|
||||
rb.sleep_timer = timer;
|
||||
if should_sleep {
|
||||
rb.is_sleeping = true;
|
||||
rb.velocity = Vec3::ZERO;
|
||||
rb.angular_velocity = Vec3::ZERO;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -33,7 +107,7 @@ mod tests {
|
||||
use voltex_ecs::World;
|
||||
use voltex_ecs::Transform;
|
||||
use voltex_math::Vec3;
|
||||
use crate::RigidBody;
|
||||
use crate::{RigidBody, Collider};
|
||||
|
||||
fn approx(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 1e-4
|
||||
@@ -93,4 +167,146 @@ mod tests {
|
||||
let expected_x = 5.0 * config.fixed_dt;
|
||||
assert!(approx(t.position.x, expected_x));
|
||||
}
|
||||
|
||||
// --- Inertia tensor tests ---
|
||||
|
||||
#[test]
|
||||
fn test_inertia_tensor_sphere() {
|
||||
let c = Collider::Sphere { radius: 1.0 };
|
||||
let i = inertia_tensor(&c, 1.0);
|
||||
let expected = 2.0 / 5.0;
|
||||
assert!(approx(i.x, expected));
|
||||
assert!(approx(i.y, expected));
|
||||
assert!(approx(i.z, expected));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inertia_tensor_box() {
|
||||
let c = Collider::Box { half_extents: Vec3::ONE };
|
||||
let i = inertia_tensor(&c, 12.0);
|
||||
// w=2, h=2, d=2, factor=1 => ix = 4+4=8, etc
|
||||
assert!(approx(i.x, 8.0));
|
||||
assert!(approx(i.y, 8.0));
|
||||
assert!(approx(i.z, 8.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inertia_tensor_capsule() {
|
||||
let c = Collider::Capsule { radius: 0.5, half_height: 1.0 };
|
||||
let i = inertia_tensor(&c, 1.0);
|
||||
// Approximate as cylinder: r=0.5, h=2.0
|
||||
// ix = m*(3*r^2 + h^2)/12 = (3*0.25 + 4)/12 = 4.75/12
|
||||
assert!(approx(i.x, 4.75 / 12.0));
|
||||
// iy = m*r^2/2 = 0.25/2 = 0.125
|
||||
assert!(approx(i.y, 0.125));
|
||||
}
|
||||
|
||||
// --- Angular velocity integration tests ---
|
||||
|
||||
#[test]
|
||||
fn test_spinning_sphere() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::ZERO));
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.angular_velocity = Vec3::new(0.0, 3.14159, 0.0); // ~PI rad/s around Y
|
||||
rb.gravity_scale = 0.0;
|
||||
world.add(e, rb);
|
||||
|
||||
let config = PhysicsConfig::default();
|
||||
integrate(&mut world, &config);
|
||||
|
||||
let t = world.get::<Transform>(e).unwrap();
|
||||
let expected_rot_y = 3.14159 * config.fixed_dt;
|
||||
assert!(approx(t.rotation.y, expected_rot_y));
|
||||
// Position should not change (no linear velocity, no gravity)
|
||||
assert!(approx(t.position.x, 0.0));
|
||||
assert!(approx(t.position.y, 0.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_angular_velocity_persists() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::ZERO));
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.angular_velocity = Vec3::new(1.0, 0.0, 0.0);
|
||||
rb.gravity_scale = 0.0;
|
||||
world.add(e, rb);
|
||||
|
||||
let config = PhysicsConfig::default();
|
||||
integrate(&mut world, &config);
|
||||
integrate(&mut world, &config);
|
||||
|
||||
let t = world.get::<Transform>(e).unwrap();
|
||||
let expected_rot_x = 2.0 * config.fixed_dt;
|
||||
assert!(approx(t.rotation.x, expected_rot_x));
|
||||
}
|
||||
|
||||
// --- Sleep system tests ---
|
||||
|
||||
#[test]
|
||||
fn test_body_sleeps_after_resting() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::ZERO));
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.velocity = Vec3::ZERO;
|
||||
rb.gravity_scale = 0.0;
|
||||
world.add(e, rb);
|
||||
|
||||
let config = PhysicsConfig {
|
||||
gravity: Vec3::ZERO,
|
||||
fixed_dt: 1.0 / 60.0,
|
||||
solver_iterations: 4,
|
||||
};
|
||||
|
||||
// Integrate many times until sleep timer exceeds threshold
|
||||
for _ in 0..60 {
|
||||
integrate(&mut world, &config);
|
||||
}
|
||||
|
||||
let rb = world.get::<RigidBody>(e).unwrap();
|
||||
assert!(rb.is_sleeping, "body should be sleeping after resting");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sleeping_body_not_integrated() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(0.0, 10.0, 0.0)));
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.is_sleeping = true;
|
||||
world.add(e, rb);
|
||||
|
||||
let config = PhysicsConfig::default();
|
||||
integrate(&mut world, &config);
|
||||
|
||||
let t = world.get::<Transform>(e).unwrap();
|
||||
assert!(approx(t.position.y, 10.0), "sleeping body should not move");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_moving_body_does_not_sleep() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::ZERO));
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.velocity = Vec3::new(5.0, 0.0, 0.0);
|
||||
rb.gravity_scale = 0.0;
|
||||
world.add(e, rb);
|
||||
|
||||
let config = PhysicsConfig {
|
||||
gravity: Vec3::ZERO,
|
||||
fixed_dt: 1.0 / 60.0,
|
||||
solver_iterations: 4,
|
||||
};
|
||||
|
||||
for _ in 0..60 {
|
||||
integrate(&mut world, &config);
|
||||
}
|
||||
|
||||
let rb = world.get::<RigidBody>(e).unwrap();
|
||||
assert!(!rb.is_sleeping, "fast-moving body should not sleep");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,15 @@ pub mod rigid_body;
|
||||
pub mod integrator;
|
||||
pub mod solver;
|
||||
pub mod raycast;
|
||||
pub mod ccd;
|
||||
|
||||
pub use bvh::BvhTree;
|
||||
pub use collider::Collider;
|
||||
pub use contact::ContactPoint;
|
||||
pub use collision::detect_collisions;
|
||||
pub use rigid_body::{RigidBody, PhysicsConfig};
|
||||
pub use integrator::integrate;
|
||||
pub use integrator::{integrate, inertia_tensor, inv_inertia};
|
||||
pub use solver::{resolve_collisions, physics_step};
|
||||
pub use raycast::{RayHit, raycast};
|
||||
pub use raycast::{RayHit, raycast, raycast_all};
|
||||
pub use ray::ray_vs_triangle;
|
||||
pub use ccd::swept_sphere_vs_aabb;
|
||||
|
||||
@@ -183,6 +183,43 @@ pub fn ray_vs_capsule(ray: &Ray, center: Vec3, radius: f32, half_height: f32) ->
|
||||
best
|
||||
}
|
||||
|
||||
/// Ray vs Triangle (Möller–Trumbore algorithm).
|
||||
/// Returns (t, normal) where normal is the triangle face normal.
|
||||
pub fn ray_vs_triangle(ray: &Ray, v0: Vec3, v1: Vec3, v2: Vec3) -> Option<(f32, Vec3)> {
|
||||
let edge1 = v1 - v0;
|
||||
let edge2 = v2 - v0;
|
||||
let h = ray.direction.cross(edge2);
|
||||
let a = edge1.dot(h);
|
||||
|
||||
if a.abs() < 1e-8 {
|
||||
return None; // Ray is parallel to triangle
|
||||
}
|
||||
|
||||
let f = 1.0 / a;
|
||||
let s = ray.origin - v0;
|
||||
let u = f * s.dot(h);
|
||||
|
||||
if u < 0.0 || u > 1.0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let q = s.cross(edge1);
|
||||
let v = f * ray.direction.dot(q);
|
||||
|
||||
if v < 0.0 || u + v > 1.0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let t = f * edge2.dot(q);
|
||||
|
||||
if t < 0.0 {
|
||||
return None; // Triangle is behind the ray
|
||||
}
|
||||
|
||||
let normal = edge1.cross(edge2).normalize();
|
||||
Some((t, normal))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -265,4 +302,61 @@ mod tests {
|
||||
let (t, _normal) = ray_vs_box(&ray, Vec3::ZERO, Vec3::ONE).unwrap();
|
||||
assert!(approx(t, 0.0));
|
||||
}
|
||||
|
||||
// --- ray_vs_triangle tests ---
|
||||
|
||||
#[test]
|
||||
fn test_triangle_hit() {
|
||||
let v0 = Vec3::new(-1.0, -1.0, 5.0);
|
||||
let v1 = Vec3::new(1.0, -1.0, 5.0);
|
||||
let v2 = Vec3::new(0.0, 1.0, 5.0);
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::Z);
|
||||
let (t, normal) = ray_vs_triangle(&ray, v0, v1, v2).unwrap();
|
||||
assert!(approx(t, 5.0));
|
||||
// Normal should point toward -Z (facing the ray)
|
||||
assert!(normal.z.abs() > 0.9);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_triangle_miss() {
|
||||
let v0 = Vec3::new(-1.0, -1.0, 5.0);
|
||||
let v1 = Vec3::new(1.0, -1.0, 5.0);
|
||||
let v2 = Vec3::new(0.0, 1.0, 5.0);
|
||||
// Ray pointing away
|
||||
let ray = Ray::new(Vec3::new(10.0, 10.0, 0.0), Vec3::Z);
|
||||
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_triangle_behind_ray() {
|
||||
let v0 = Vec3::new(-1.0, -1.0, -5.0);
|
||||
let v1 = Vec3::new(1.0, -1.0, -5.0);
|
||||
let v2 = Vec3::new(0.0, 1.0, -5.0);
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::Z);
|
||||
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_triangle_parallel() {
|
||||
let v0 = Vec3::new(0.0, 0.0, 5.0);
|
||||
let v1 = Vec3::new(1.0, 0.0, 5.0);
|
||||
let v2 = Vec3::new(0.0, 0.0, 6.0);
|
||||
// Ray parallel to triangle (in XZ plane)
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_triangle_edge_hit() {
|
||||
// Ray hitting exactly on an edge
|
||||
let v0 = Vec3::new(-1.0, 0.0, 5.0);
|
||||
let v1 = Vec3::new(1.0, 0.0, 5.0);
|
||||
let v2 = Vec3::new(0.0, 2.0, 5.0);
|
||||
// Hit on the midpoint of v0-v1 edge
|
||||
let ray = Ray::new(Vec3::new(0.0, 0.0, 0.0), Vec3::Z);
|
||||
let result = ray_vs_triangle(&ray, v0, v1, v2);
|
||||
assert!(result.is_some());
|
||||
let (t, _) = result.unwrap();
|
||||
assert!(approx(t, 5.0));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +72,58 @@ pub fn raycast(world: &World, ray: &Ray, max_dist: f32) -> Option<RayHit> {
|
||||
closest
|
||||
}
|
||||
|
||||
/// Cast a ray and return ALL hits sorted by distance.
|
||||
pub fn raycast_all(world: &World, ray: &Ray, max_dist: f32) -> Vec<RayHit> {
|
||||
let entities: Vec<(Entity, Vec3, Collider)> = world
|
||||
.query2::<Transform, Collider>()
|
||||
.into_iter()
|
||||
.map(|(e, t, c)| (e, t.position, *c))
|
||||
.collect();
|
||||
|
||||
if entities.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut hits = Vec::new();
|
||||
|
||||
for (entity, pos, collider) in &entities {
|
||||
let aabb = collider.aabb(*pos);
|
||||
|
||||
// Broad phase: ray vs AABB
|
||||
match ray_tests::ray_vs_aabb(ray, &aabb) {
|
||||
Some(t) if t <= max_dist => {}
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
// Narrow phase
|
||||
let result = match collider {
|
||||
Collider::Sphere { radius } => {
|
||||
ray_tests::ray_vs_sphere(ray, *pos, *radius)
|
||||
}
|
||||
Collider::Box { half_extents } => {
|
||||
ray_tests::ray_vs_box(ray, *pos, *half_extents)
|
||||
}
|
||||
Collider::Capsule { radius, half_height } => {
|
||||
ray_tests::ray_vs_capsule(ray, *pos, *radius, *half_height)
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((t, normal)) = result {
|
||||
if t <= max_dist {
|
||||
hits.push(RayHit {
|
||||
entity: *entity,
|
||||
t,
|
||||
point: ray.at(t),
|
||||
normal,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hits.sort_by(|a, b| a.t.partial_cmp(&b.t).unwrap());
|
||||
hits
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -150,6 +202,62 @@ mod tests {
|
||||
assert!(approx(hit.t, 4.0));
|
||||
}
|
||||
|
||||
// --- raycast_all tests ---
|
||||
|
||||
#[test]
|
||||
fn test_raycast_all_multiple_hits() {
|
||||
let mut world = World::new();
|
||||
|
||||
let near = world.spawn();
|
||||
world.add(near, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
|
||||
world.add(near, Collider::Sphere { radius: 0.5 });
|
||||
|
||||
let mid = world.spawn();
|
||||
world.add(mid, Transform::from_position(Vec3::new(6.0, 0.0, 0.0)));
|
||||
world.add(mid, Collider::Sphere { radius: 0.5 });
|
||||
|
||||
let far = world.spawn();
|
||||
world.add(far, Transform::from_position(Vec3::new(10.0, 0.0, 0.0)));
|
||||
world.add(far, Collider::Sphere { radius: 0.5 });
|
||||
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = raycast_all(&world, &ray, 100.0);
|
||||
|
||||
assert_eq!(hits.len(), 3);
|
||||
assert_eq!(hits[0].entity, near);
|
||||
assert_eq!(hits[1].entity, mid);
|
||||
assert_eq!(hits[2].entity, far);
|
||||
assert!(hits[0].t < hits[1].t);
|
||||
assert!(hits[1].t < hits[2].t);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_raycast_all_empty() {
|
||||
let world = World::new();
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = raycast_all(&world, &ray, 100.0);
|
||||
assert!(hits.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_raycast_all_max_dist() {
|
||||
let mut world = World::new();
|
||||
|
||||
let near = world.spawn();
|
||||
world.add(near, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
|
||||
world.add(near, Collider::Sphere { radius: 0.5 });
|
||||
|
||||
let far = world.spawn();
|
||||
world.add(far, Transform::from_position(Vec3::new(50.0, 0.0, 0.0)));
|
||||
world.add(far, Collider::Sphere { radius: 0.5 });
|
||||
|
||||
let ray = Ray::new(Vec3::ZERO, Vec3::X);
|
||||
let hits = raycast_all(&world, &ray, 10.0);
|
||||
|
||||
assert_eq!(hits.len(), 1);
|
||||
assert_eq!(hits[0].entity, near);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_sphere_box() {
|
||||
let mut world = World::new();
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use voltex_math::Vec3;
|
||||
|
||||
pub const SLEEP_VELOCITY_THRESHOLD: f32 = 0.01;
|
||||
pub const SLEEP_TIME_THRESHOLD: f32 = 0.5;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct RigidBody {
|
||||
pub velocity: Vec3,
|
||||
@@ -8,6 +11,8 @@ pub struct RigidBody {
|
||||
pub restitution: f32,
|
||||
pub gravity_scale: f32,
|
||||
pub friction: f32, // Coulomb friction coefficient, default 0.5
|
||||
pub is_sleeping: bool,
|
||||
pub sleep_timer: f32,
|
||||
}
|
||||
|
||||
impl RigidBody {
|
||||
@@ -19,6 +24,8 @@ impl RigidBody {
|
||||
restitution: 0.3,
|
||||
gravity_scale: 1.0,
|
||||
friction: 0.5,
|
||||
is_sleeping: false,
|
||||
sleep_timer: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +37,8 @@ impl RigidBody {
|
||||
restitution: 0.3,
|
||||
gravity_scale: 0.0,
|
||||
friction: 0.5,
|
||||
is_sleeping: false,
|
||||
sleep_timer: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,11 +49,18 @@ impl RigidBody {
|
||||
pub fn is_static(&self) -> bool {
|
||||
self.mass == 0.0
|
||||
}
|
||||
|
||||
/// Wake this body from sleep.
|
||||
pub fn wake(&mut self) {
|
||||
self.is_sleeping = false;
|
||||
self.sleep_timer = 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PhysicsConfig {
|
||||
pub gravity: Vec3,
|
||||
pub fixed_dt: f32,
|
||||
pub solver_iterations: u32,
|
||||
}
|
||||
|
||||
impl Default for PhysicsConfig {
|
||||
@@ -52,6 +68,7 @@ impl Default for PhysicsConfig {
|
||||
Self {
|
||||
gravity: Vec3::new(0.0, -9.81, 0.0),
|
||||
fixed_dt: 1.0 / 60.0,
|
||||
solver_iterations: 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,6 +86,8 @@ mod tests {
|
||||
assert_eq!(rb.velocity, Vec3::ZERO);
|
||||
assert_eq!(rb.restitution, 0.3);
|
||||
assert_eq!(rb.gravity_scale, 1.0);
|
||||
assert!(!rb.is_sleeping);
|
||||
assert_eq!(rb.sleep_timer, 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -85,5 +104,16 @@ mod tests {
|
||||
let cfg = PhysicsConfig::default();
|
||||
assert!((cfg.gravity.y - (-9.81)).abs() < 1e-6);
|
||||
assert!((cfg.fixed_dt - 1.0 / 60.0).abs() < 1e-6);
|
||||
assert_eq!(cfg.solver_iterations, 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wake() {
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.is_sleeping = true;
|
||||
rb.sleep_timer = 1.0;
|
||||
rb.wake();
|
||||
assert!(!rb.is_sleeping);
|
||||
assert_eq!(rb.sleep_timer, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,102 +2,271 @@ use voltex_ecs::{World, Entity};
|
||||
use voltex_ecs::Transform;
|
||||
use voltex_math::Vec3;
|
||||
|
||||
use crate::collider::Collider;
|
||||
use crate::contact::ContactPoint;
|
||||
use crate::rigid_body::{RigidBody, PhysicsConfig};
|
||||
use crate::collision::detect_collisions;
|
||||
use crate::integrator::integrate;
|
||||
use crate::integrator::{integrate, inertia_tensor, inv_inertia};
|
||||
use crate::ccd;
|
||||
|
||||
const POSITION_SLOP: f32 = 0.01;
|
||||
const POSITION_PERCENT: f32 = 0.4;
|
||||
|
||||
pub fn resolve_collisions(world: &mut World, contacts: &[ContactPoint]) {
|
||||
let mut velocity_changes: Vec<(Entity, Vec3)> = Vec::new();
|
||||
let mut position_changes: Vec<(Entity, Vec3)> = Vec::new();
|
||||
pub fn resolve_collisions(world: &mut World, contacts: &[ContactPoint], iterations: u32) {
|
||||
// Wake sleeping bodies that are in contact
|
||||
wake_colliding_bodies(world, contacts);
|
||||
|
||||
for contact in contacts {
|
||||
let rb_a = world.get::<RigidBody>(contact.entity_a).copied();
|
||||
let rb_b = world.get::<RigidBody>(contact.entity_b).copied();
|
||||
for _iter in 0..iterations {
|
||||
let mut velocity_changes: Vec<(Entity, Vec3, Vec3)> = Vec::new(); // (entity, dv_linear, dv_angular)
|
||||
let mut position_changes: Vec<(Entity, Vec3)> = Vec::new();
|
||||
|
||||
let (rb_a, rb_b) = match (rb_a, rb_b) {
|
||||
(Some(a), Some(b)) => (a, b),
|
||||
_ => continue,
|
||||
};
|
||||
for contact in contacts {
|
||||
let rb_a = world.get::<RigidBody>(contact.entity_a).copied();
|
||||
let rb_b = world.get::<RigidBody>(contact.entity_b).copied();
|
||||
let col_a = world.get::<Collider>(contact.entity_a).copied();
|
||||
let col_b = world.get::<Collider>(contact.entity_b).copied();
|
||||
let pos_a = world.get::<Transform>(contact.entity_a).map(|t| t.position);
|
||||
let pos_b = world.get::<Transform>(contact.entity_b).map(|t| t.position);
|
||||
|
||||
let inv_mass_a = rb_a.inv_mass();
|
||||
let inv_mass_b = rb_b.inv_mass();
|
||||
let inv_mass_sum = inv_mass_a + inv_mass_b;
|
||||
|
||||
if inv_mass_sum == 0.0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let v_rel = rb_a.velocity - rb_b.velocity;
|
||||
let v_rel_n = v_rel.dot(contact.normal);
|
||||
|
||||
// normal points A→B; v_rel_n > 0 means A approaches B → apply impulse
|
||||
let j = if v_rel_n > 0.0 {
|
||||
let e = rb_a.restitution.min(rb_b.restitution);
|
||||
let j = (1.0 + e) * v_rel_n / inv_mass_sum;
|
||||
|
||||
velocity_changes.push((contact.entity_a, contact.normal * (-j * inv_mass_a)));
|
||||
velocity_changes.push((contact.entity_b, contact.normal * (j * inv_mass_b)));
|
||||
|
||||
j
|
||||
} else {
|
||||
// No separating impulse needed, but use contact depth to derive a
|
||||
// representative normal force magnitude for friction clamping.
|
||||
// A simple proxy: treat the penetration as providing a static normal force.
|
||||
contact.depth / inv_mass_sum
|
||||
};
|
||||
|
||||
// Coulomb friction: tangential impulse clamped to mu * normal impulse
|
||||
let v_rel_n_scalar = v_rel.dot(contact.normal);
|
||||
let v_rel_tangent = v_rel - contact.normal * v_rel_n_scalar;
|
||||
let tangent_len = v_rel_tangent.length();
|
||||
|
||||
if tangent_len > 1e-6 {
|
||||
let tangent = v_rel_tangent * (1.0 / tangent_len);
|
||||
|
||||
// Friction coefficient: average of both bodies
|
||||
let mu = (rb_a.friction + rb_b.friction) * 0.5;
|
||||
|
||||
// Coulomb's law: friction impulse <= mu * normal impulse
|
||||
let jt = -v_rel_tangent.dot(tangent) / inv_mass_sum;
|
||||
let friction_j = if jt.abs() <= j * mu {
|
||||
jt // static friction
|
||||
} else {
|
||||
j * mu * jt.signum() // dynamic friction (sliding), clamped magnitude
|
||||
let (rb_a, rb_b) = match (rb_a, rb_b) {
|
||||
(Some(a), Some(b)) => (a, b),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
velocity_changes.push((contact.entity_a, tangent * (friction_j * inv_mass_a)));
|
||||
velocity_changes.push((contact.entity_b, tangent * (-friction_j * inv_mass_b)));
|
||||
let inv_mass_a = rb_a.inv_mass();
|
||||
let inv_mass_b = rb_b.inv_mass();
|
||||
let inv_mass_sum = inv_mass_a + inv_mass_b;
|
||||
|
||||
if inv_mass_sum == 0.0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compute lever arms for angular impulse
|
||||
let center_a = pos_a.unwrap_or(Vec3::ZERO);
|
||||
let center_b = pos_b.unwrap_or(Vec3::ZERO);
|
||||
let r_a = contact.point_on_a - center_a;
|
||||
let r_b = contact.point_on_b - center_b;
|
||||
|
||||
// Compute inverse inertia
|
||||
let inv_i_a = col_a.map(|c| inv_inertia(inertia_tensor(&c, rb_a.mass)))
|
||||
.unwrap_or(Vec3::ZERO);
|
||||
let inv_i_b = col_b.map(|c| inv_inertia(inertia_tensor(&c, rb_b.mass)))
|
||||
.unwrap_or(Vec3::ZERO);
|
||||
|
||||
// Relative velocity at contact point (including angular contribution)
|
||||
let v_a = rb_a.velocity + rb_a.angular_velocity.cross(r_a);
|
||||
let v_b = rb_b.velocity + rb_b.angular_velocity.cross(r_b);
|
||||
let v_rel = v_a - v_b;
|
||||
let v_rel_n = v_rel.dot(contact.normal);
|
||||
|
||||
// Effective mass including rotational terms
|
||||
let r_a_cross_n = r_a.cross(contact.normal);
|
||||
let r_b_cross_n = r_b.cross(contact.normal);
|
||||
let angular_term_a = Vec3::new(
|
||||
r_a_cross_n.x * inv_i_a.x,
|
||||
r_a_cross_n.y * inv_i_a.y,
|
||||
r_a_cross_n.z * inv_i_a.z,
|
||||
).cross(r_a).dot(contact.normal);
|
||||
let angular_term_b = Vec3::new(
|
||||
r_b_cross_n.x * inv_i_b.x,
|
||||
r_b_cross_n.y * inv_i_b.y,
|
||||
r_b_cross_n.z * inv_i_b.z,
|
||||
).cross(r_b).dot(contact.normal);
|
||||
|
||||
let effective_mass = inv_mass_sum + angular_term_a + angular_term_b;
|
||||
|
||||
// normal points A→B; v_rel_n > 0 means A approaches B → apply impulse
|
||||
let j = if v_rel_n > 0.0 {
|
||||
let e = rb_a.restitution.min(rb_b.restitution);
|
||||
let j = (1.0 + e) * v_rel_n / effective_mass;
|
||||
|
||||
// Linear impulse
|
||||
velocity_changes.push((contact.entity_a, contact.normal * (-j * inv_mass_a), Vec3::ZERO));
|
||||
velocity_changes.push((contact.entity_b, contact.normal * (j * inv_mass_b), Vec3::ZERO));
|
||||
|
||||
// Angular impulse: torque = r × impulse
|
||||
let angular_impulse_a = r_a.cross(contact.normal * (-j));
|
||||
let angular_impulse_b = r_b.cross(contact.normal * j);
|
||||
let dw_a = Vec3::new(
|
||||
angular_impulse_a.x * inv_i_a.x,
|
||||
angular_impulse_a.y * inv_i_a.y,
|
||||
angular_impulse_a.z * inv_i_a.z,
|
||||
);
|
||||
let dw_b = Vec3::new(
|
||||
angular_impulse_b.x * inv_i_b.x,
|
||||
angular_impulse_b.y * inv_i_b.y,
|
||||
angular_impulse_b.z * inv_i_b.z,
|
||||
);
|
||||
velocity_changes.push((contact.entity_a, Vec3::ZERO, dw_a));
|
||||
velocity_changes.push((contact.entity_b, Vec3::ZERO, dw_b));
|
||||
|
||||
j
|
||||
} else {
|
||||
contact.depth / inv_mass_sum
|
||||
};
|
||||
|
||||
// Coulomb friction: tangential impulse clamped to mu * normal impulse
|
||||
let v_rel_tangent = v_rel - contact.normal * v_rel_n;
|
||||
let tangent_len = v_rel_tangent.length();
|
||||
|
||||
if tangent_len > 1e-6 {
|
||||
let tangent = v_rel_tangent * (1.0 / tangent_len);
|
||||
let mu = (rb_a.friction + rb_b.friction) * 0.5;
|
||||
|
||||
let jt = -v_rel_tangent.dot(tangent) / effective_mass;
|
||||
let friction_j = if jt.abs() <= j * mu {
|
||||
jt
|
||||
} else {
|
||||
j * mu * jt.signum()
|
||||
};
|
||||
|
||||
velocity_changes.push((contact.entity_a, tangent * (friction_j * inv_mass_a), Vec3::ZERO));
|
||||
velocity_changes.push((contact.entity_b, tangent * (-friction_j * inv_mass_b), Vec3::ZERO));
|
||||
|
||||
// Angular friction impulse
|
||||
let angular_fric_a = r_a.cross(tangent * friction_j);
|
||||
let angular_fric_b = r_b.cross(tangent * (-friction_j));
|
||||
let dw_fric_a = Vec3::new(
|
||||
angular_fric_a.x * inv_i_a.x,
|
||||
angular_fric_a.y * inv_i_a.y,
|
||||
angular_fric_a.z * inv_i_a.z,
|
||||
);
|
||||
let dw_fric_b = Vec3::new(
|
||||
angular_fric_b.x * inv_i_b.x,
|
||||
angular_fric_b.y * inv_i_b.y,
|
||||
angular_fric_b.z * inv_i_b.z,
|
||||
);
|
||||
velocity_changes.push((contact.entity_a, Vec3::ZERO, dw_fric_a));
|
||||
velocity_changes.push((contact.entity_b, Vec3::ZERO, dw_fric_b));
|
||||
}
|
||||
|
||||
// Position correction only on first iteration
|
||||
if _iter == 0 {
|
||||
let correction_mag = (contact.depth - POSITION_SLOP).max(0.0) * POSITION_PERCENT / inv_mass_sum;
|
||||
if correction_mag > 0.0 {
|
||||
let correction = contact.normal * correction_mag;
|
||||
position_changes.push((contact.entity_a, correction * (-inv_mass_a)));
|
||||
position_changes.push((contact.entity_b, correction * inv_mass_b));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let correction_mag = (contact.depth - POSITION_SLOP).max(0.0) * POSITION_PERCENT / inv_mass_sum;
|
||||
if correction_mag > 0.0 {
|
||||
let correction = contact.normal * correction_mag;
|
||||
position_changes.push((contact.entity_a, correction * (-inv_mass_a)));
|
||||
position_changes.push((contact.entity_b, correction * inv_mass_b));
|
||||
// Apply velocity changes
|
||||
for (entity, dv, dw) in velocity_changes {
|
||||
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
|
||||
rb.velocity = rb.velocity + dv;
|
||||
rb.angular_velocity = rb.angular_velocity + dw;
|
||||
}
|
||||
}
|
||||
|
||||
// Apply position corrections
|
||||
for (entity, dp) in position_changes {
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position = t.position + dp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (entity, dv) in velocity_changes {
|
||||
fn wake_colliding_bodies(world: &mut World, contacts: &[ContactPoint]) {
|
||||
let wake_list: Vec<Entity> = contacts
|
||||
.iter()
|
||||
.flat_map(|c| {
|
||||
let mut entities = Vec::new();
|
||||
if let Some(rb) = world.get::<RigidBody>(c.entity_a) {
|
||||
if rb.is_sleeping { entities.push(c.entity_a); }
|
||||
}
|
||||
if let Some(rb) = world.get::<RigidBody>(c.entity_b) {
|
||||
if rb.is_sleeping { entities.push(c.entity_b); }
|
||||
}
|
||||
entities
|
||||
})
|
||||
.collect();
|
||||
|
||||
for entity in wake_list {
|
||||
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
|
||||
rb.velocity = rb.velocity + dv;
|
||||
}
|
||||
}
|
||||
|
||||
for (entity, dp) in position_changes {
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position = t.position + dp;
|
||||
rb.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn physics_step(world: &mut World, config: &PhysicsConfig) {
|
||||
// CCD: for fast-moving bodies, check for tunneling
|
||||
apply_ccd(world, config);
|
||||
|
||||
integrate(world, config);
|
||||
let contacts = detect_collisions(world);
|
||||
resolve_collisions(world, &contacts);
|
||||
resolve_collisions(world, &contacts, config.solver_iterations);
|
||||
}
|
||||
|
||||
fn apply_ccd(world: &mut World, config: &PhysicsConfig) {
|
||||
// Gather fast-moving bodies and all collider AABBs
|
||||
let bodies: Vec<(Entity, Vec3, Vec3, Collider)> = world
|
||||
.query3::<Transform, RigidBody, Collider>()
|
||||
.into_iter()
|
||||
.filter(|(_, _, rb, _)| !rb.is_static() && !rb.is_sleeping)
|
||||
.map(|(e, t, rb, c)| (e, t.position, rb.velocity, *c))
|
||||
.collect();
|
||||
|
||||
let all_colliders: Vec<(Entity, voltex_math::AABB)> = world
|
||||
.query2::<Transform, Collider>()
|
||||
.into_iter()
|
||||
.map(|(e, t, c)| (e, c.aabb(t.position)))
|
||||
.collect();
|
||||
|
||||
let mut ccd_corrections: Vec<(Entity, Vec3)> = Vec::new();
|
||||
|
||||
for (entity, pos, vel, collider) in &bodies {
|
||||
let speed = vel.length();
|
||||
let collider_radius = match collider {
|
||||
Collider::Sphere { radius } => *radius,
|
||||
Collider::Box { half_extents } => half_extents.x.min(half_extents.y).min(half_extents.z),
|
||||
Collider::Capsule { radius, .. } => *radius,
|
||||
};
|
||||
|
||||
// Only apply CCD if displacement > collider radius
|
||||
if speed * config.fixed_dt <= collider_radius {
|
||||
continue;
|
||||
}
|
||||
|
||||
let sweep_radius = match collider {
|
||||
Collider::Sphere { radius } => *radius,
|
||||
_ => collider_radius,
|
||||
};
|
||||
|
||||
let end = *pos + *vel * config.fixed_dt;
|
||||
|
||||
let mut earliest_t = 1.0f32;
|
||||
|
||||
for (other_entity, other_aabb) in &all_colliders {
|
||||
if *other_entity == *entity {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(t) = ccd::swept_sphere_vs_aabb(*pos, end, sweep_radius, other_aabb) {
|
||||
if t < earliest_t {
|
||||
earliest_t = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if earliest_t < 1.0 {
|
||||
// Place body just before collision point
|
||||
let safe_t = (earliest_t - 0.01).max(0.0);
|
||||
let safe_pos = *pos + *vel * config.fixed_dt * safe_t;
|
||||
ccd_corrections.push((*entity, safe_pos));
|
||||
}
|
||||
}
|
||||
|
||||
for (entity, safe_pos) in ccd_corrections {
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position = safe_pos;
|
||||
}
|
||||
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
|
||||
// Reduce velocity to prevent re-tunneling
|
||||
rb.velocity = rb.velocity * 0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -138,7 +307,7 @@ mod tests {
|
||||
let contacts = detect_collisions(&world);
|
||||
assert_eq!(contacts.len(), 1);
|
||||
|
||||
resolve_collisions(&mut world, &contacts);
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
|
||||
let va = world.get::<RigidBody>(a).unwrap().velocity;
|
||||
let vb = world.get::<RigidBody>(b).unwrap().velocity;
|
||||
@@ -168,7 +337,7 @@ mod tests {
|
||||
let contacts = detect_collisions(&world);
|
||||
assert_eq!(contacts.len(), 1);
|
||||
|
||||
resolve_collisions(&mut world, &contacts);
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
|
||||
let ball_rb = world.get::<RigidBody>(ball).unwrap();
|
||||
let floor_rb = world.get::<RigidBody>(floor).unwrap();
|
||||
@@ -198,7 +367,7 @@ mod tests {
|
||||
let contacts = detect_collisions(&world);
|
||||
assert_eq!(contacts.len(), 1);
|
||||
|
||||
resolve_collisions(&mut world, &contacts);
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
|
||||
let pa = world.get::<Transform>(a).unwrap().position;
|
||||
let pb = world.get::<Transform>(b).unwrap().position;
|
||||
@@ -234,14 +403,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_friction_slows_sliding() {
|
||||
// Ball sliding on static floor with friction
|
||||
let mut world = World::new();
|
||||
|
||||
let ball = world.spawn();
|
||||
world.add(ball, Transform::from_position(Vec3::new(0.0, 0.4, 0.0)));
|
||||
world.add(ball, Collider::Sphere { radius: 0.5 });
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.velocity = Vec3::new(5.0, 0.0, 0.0); // sliding horizontally
|
||||
rb.velocity = Vec3::new(5.0, 0.0, 0.0);
|
||||
rb.gravity_scale = 0.0;
|
||||
rb.friction = 0.5;
|
||||
world.add(ball, rb);
|
||||
@@ -253,14 +421,12 @@ mod tests {
|
||||
floor_rb.friction = 0.5;
|
||||
world.add(floor, floor_rb);
|
||||
|
||||
// Ball center at 0.4, radius 0.5, floor top at 0.0 → overlap 0.1
|
||||
let contacts = detect_collisions(&world);
|
||||
if !contacts.is_empty() {
|
||||
resolve_collisions(&mut world, &contacts);
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
}
|
||||
|
||||
let ball_v = world.get::<RigidBody>(ball).unwrap().velocity;
|
||||
// X velocity should be reduced by friction
|
||||
assert!(ball_v.x < 5.0, "friction should slow horizontal velocity: {}", ball_v.x);
|
||||
assert!(ball_v.x > 0.0, "should still be moving: {}", ball_v.x);
|
||||
}
|
||||
@@ -280,11 +446,125 @@ mod tests {
|
||||
world.add(b, RigidBody::statik());
|
||||
|
||||
let contacts = detect_collisions(&world);
|
||||
resolve_collisions(&mut world, &contacts);
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
|
||||
let pa = world.get::<Transform>(a).unwrap().position;
|
||||
let pb = world.get::<Transform>(b).unwrap().position;
|
||||
assert!(approx(pa.x, 0.0));
|
||||
assert!(approx(pb.x, 0.5));
|
||||
}
|
||||
|
||||
// --- Angular impulse tests ---
|
||||
|
||||
#[test]
|
||||
fn test_off_center_hit_produces_spin() {
|
||||
let mut world = World::new();
|
||||
|
||||
// Sphere A moving right, hitting sphere B off-center (offset in Y)
|
||||
let a = world.spawn();
|
||||
world.add(a, Transform::from_position(Vec3::new(-0.5, 0.5, 0.0)));
|
||||
world.add(a, Collider::Sphere { radius: 1.0 });
|
||||
let mut rb_a = RigidBody::dynamic(1.0);
|
||||
rb_a.velocity = Vec3::new(2.0, 0.0, 0.0);
|
||||
rb_a.restitution = 0.5;
|
||||
rb_a.gravity_scale = 0.0;
|
||||
world.add(a, rb_a);
|
||||
|
||||
let b = world.spawn();
|
||||
world.add(b, Transform::from_position(Vec3::new(0.5, -0.5, 0.0)));
|
||||
world.add(b, Collider::Sphere { radius: 1.0 });
|
||||
let mut rb_b = RigidBody::dynamic(1.0);
|
||||
rb_b.gravity_scale = 0.0;
|
||||
rb_b.restitution = 0.5;
|
||||
world.add(b, rb_b);
|
||||
|
||||
let contacts = detect_collisions(&world);
|
||||
assert!(!contacts.is_empty());
|
||||
|
||||
resolve_collisions(&mut world, &contacts, 4);
|
||||
|
||||
let rb_a_after = world.get::<RigidBody>(a).unwrap();
|
||||
let rb_b_after = world.get::<RigidBody>(b).unwrap();
|
||||
|
||||
// At least one body should have non-zero angular velocity after off-center collision
|
||||
let total_angular = rb_a_after.angular_velocity.length() + rb_b_after.angular_velocity.length();
|
||||
assert!(total_angular > 1e-4, "off-center hit should produce angular velocity, got {}", total_angular);
|
||||
}
|
||||
|
||||
// --- Sequential impulse tests ---
|
||||
|
||||
#[test]
|
||||
fn test_sequential_impulse_stability() {
|
||||
// Stack of 3 boxes on floor - with iterations they should be more stable
|
||||
let mut world = World::new();
|
||||
|
||||
let floor = world.spawn();
|
||||
world.add(floor, Transform::from_position(Vec3::new(0.0, -0.5, 0.0)));
|
||||
world.add(floor, Collider::Box { half_extents: Vec3::new(10.0, 0.5, 10.0) });
|
||||
world.add(floor, RigidBody::statik());
|
||||
|
||||
let mut boxes = Vec::new();
|
||||
for i in 0..3 {
|
||||
let e = world.spawn();
|
||||
let y = 0.5 + i as f32 * 1.0;
|
||||
world.add(e, Transform::from_position(Vec3::new(0.0, y, 0.0)));
|
||||
world.add(e, Collider::Box { half_extents: Vec3::new(0.5, 0.5, 0.5) });
|
||||
let mut rb = RigidBody::dynamic(1.0);
|
||||
rb.gravity_scale = 0.0; // no gravity for stability test
|
||||
world.add(e, rb);
|
||||
boxes.push(e);
|
||||
}
|
||||
|
||||
let config = PhysicsConfig {
|
||||
gravity: Vec3::ZERO,
|
||||
fixed_dt: 1.0 / 60.0,
|
||||
solver_iterations: 4,
|
||||
};
|
||||
|
||||
// Run a few steps
|
||||
for _ in 0..5 {
|
||||
physics_step(&mut world, &config);
|
||||
}
|
||||
|
||||
// All boxes should remain roughly in place (no gravity, just resting)
|
||||
for (i, e) in boxes.iter().enumerate() {
|
||||
let t = world.get::<Transform>(*e).unwrap();
|
||||
let expected_y = 0.5 + i as f32 * 1.0;
|
||||
assert!((t.position.y - expected_y).abs() < 1.0,
|
||||
"box {} moved too much: expected y~{}, got {}", i, expected_y, t.position.y);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Wake on collision test ---
|
||||
|
||||
#[test]
|
||||
fn test_wake_on_collision() {
|
||||
let mut world = World::new();
|
||||
|
||||
// Sleeping body
|
||||
let a = world.spawn();
|
||||
world.add(a, Transform::from_position(Vec3::ZERO));
|
||||
world.add(a, Collider::Sphere { radius: 1.0 });
|
||||
let mut rb_a = RigidBody::dynamic(1.0);
|
||||
rb_a.is_sleeping = true;
|
||||
rb_a.gravity_scale = 0.0;
|
||||
world.add(a, rb_a);
|
||||
|
||||
// Moving body that collides with sleeping body
|
||||
let b = world.spawn();
|
||||
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
|
||||
world.add(b, Collider::Sphere { radius: 1.0 });
|
||||
let mut rb_b = RigidBody::dynamic(1.0);
|
||||
rb_b.velocity = Vec3::new(-2.0, 0.0, 0.0);
|
||||
rb_b.gravity_scale = 0.0;
|
||||
world.add(b, rb_b);
|
||||
|
||||
let contacts = detect_collisions(&world);
|
||||
assert!(!contacts.is_empty());
|
||||
|
||||
resolve_collisions(&mut world, &contacts, 1);
|
||||
|
||||
let rb_a_after = world.get::<RigidBody>(a).unwrap();
|
||||
assert!(!rb_a_after.is_sleeping, "body should wake on collision");
|
||||
}
|
||||
}
|
||||
|
||||
89
crates/voltex_renderer/src/brdf_lut_compute.wgsl
Normal file
89
crates/voltex_renderer/src/brdf_lut_compute.wgsl
Normal file
@@ -0,0 +1,89 @@
|
||||
// GPU Compute shader for BRDF LUT generation (split-sum approximation).
|
||||
// Workgroup size: 16x16, each thread computes one texel.
|
||||
// Output: Rg16Float texture with (scale, bias) per texel.
|
||||
|
||||
@group(0) @binding(0) var output_tex: texture_storage_2d<rg16float, write>;
|
||||
|
||||
const PI: f32 = 3.14159265358979;
|
||||
const NUM_SAMPLES: u32 = 1024u;
|
||||
|
||||
// Van der Corput radical inverse via bit-reversal
|
||||
fn radical_inverse_vdc(bits_in: u32) -> f32 {
|
||||
var bits = bits_in;
|
||||
bits = (bits << 16u) | (bits >> 16u);
|
||||
bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
|
||||
bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
|
||||
bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
|
||||
bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
|
||||
return f32(bits) * 2.3283064365386963e-10; // / 0x100000000
|
||||
}
|
||||
|
||||
// Hammersley low-discrepancy 2D sample
|
||||
fn hammersley(i: u32, n: u32) -> vec2<f32> {
|
||||
return vec2<f32>(f32(i) / f32(n), radical_inverse_vdc(i));
|
||||
}
|
||||
|
||||
// GGX importance-sampled half vector in tangent space (N = (0,0,1))
|
||||
fn importance_sample_ggx(xi: vec2<f32>, roughness: f32) -> vec3<f32> {
|
||||
let a = roughness * roughness;
|
||||
let phi = 2.0 * PI * xi.x;
|
||||
let cos_theta = sqrt((1.0 - xi.y) / (1.0 + (a * a - 1.0) * xi.y));
|
||||
let sin_theta = sqrt(max(1.0 - cos_theta * cos_theta, 0.0));
|
||||
return vec3<f32>(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta);
|
||||
}
|
||||
|
||||
// Smith geometry function for IBL: k = a^2/2
|
||||
fn geometry_smith_ibl(n_dot_v: f32, n_dot_l: f32, roughness: f32) -> f32 {
|
||||
let a = roughness * roughness;
|
||||
let k = a / 2.0;
|
||||
let ggx_v = n_dot_v / (n_dot_v * (1.0 - k) + k);
|
||||
let ggx_l = n_dot_l / (n_dot_l * (1.0 - k) + k);
|
||||
return ggx_v * ggx_l;
|
||||
}
|
||||
|
||||
@compute @workgroup_size(16, 16, 1)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(output_tex);
|
||||
if gid.x >= dims.x || gid.y >= dims.y {
|
||||
return;
|
||||
}
|
||||
|
||||
let size = f32(dims.x);
|
||||
let n_dot_v = (f32(gid.x) + 0.5) / size;
|
||||
let roughness = clamp((f32(gid.y) + 0.5) / size, 0.0, 1.0);
|
||||
|
||||
let n_dot_v_clamped = clamp(n_dot_v, 0.0, 1.0);
|
||||
|
||||
// View vector in tangent space where N = (0,0,1)
|
||||
let v = vec3<f32>(sqrt(max(1.0 - n_dot_v_clamped * n_dot_v_clamped, 0.0)), 0.0, n_dot_v_clamped);
|
||||
|
||||
var scale = 0.0;
|
||||
var bias = 0.0;
|
||||
|
||||
for (var i = 0u; i < NUM_SAMPLES; i++) {
|
||||
let xi = hammersley(i, NUM_SAMPLES);
|
||||
let h = importance_sample_ggx(xi, roughness);
|
||||
|
||||
// dot(V, H)
|
||||
let v_dot_h = max(dot(v, h), 0.0);
|
||||
|
||||
// Reflect V around H to get L
|
||||
let l = 2.0 * v_dot_h * h - v;
|
||||
|
||||
let n_dot_l = max(l.z, 0.0); // L.z in tangent space
|
||||
let n_dot_h = max(h.z, 0.0);
|
||||
|
||||
if n_dot_l > 0.0 {
|
||||
let g = geometry_smith_ibl(n_dot_v_clamped, n_dot_l, roughness);
|
||||
let g_vis = g * v_dot_h / max(n_dot_h * n_dot_v_clamped, 0.001);
|
||||
let fc = pow(1.0 - v_dot_h, 5.0);
|
||||
scale += g_vis * (1.0 - fc);
|
||||
bias += g_vis * fc;
|
||||
}
|
||||
}
|
||||
|
||||
scale /= f32(NUM_SAMPLES);
|
||||
bias /= f32(NUM_SAMPLES);
|
||||
|
||||
textureStore(output_tex, vec2<i32>(i32(gid.x), i32(gid.y)), vec4<f32>(scale, bias, 0.0, 1.0));
|
||||
}
|
||||
246
crates/voltex_renderer/src/csm.rs
Normal file
246
crates/voltex_renderer/src/csm.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
use voltex_math::{Mat4, Vec3, Vec4};
|
||||
|
||||
pub const CSM_CASCADE_COUNT: usize = 2;
|
||||
pub const CSM_MAP_SIZE: u32 = 2048;
|
||||
pub const CSM_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
|
||||
|
||||
/// Cascaded Shadow Map with 2 cascades.
|
||||
pub struct CascadedShadowMap {
|
||||
pub textures: [wgpu::Texture; CSM_CASCADE_COUNT],
|
||||
pub views: [wgpu::TextureView; CSM_CASCADE_COUNT],
|
||||
pub sampler: wgpu::Sampler,
|
||||
}
|
||||
|
||||
impl CascadedShadowMap {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let create_cascade = |label: &str| {
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some(label),
|
||||
size: wgpu::Extent3d {
|
||||
width: CSM_MAP_SIZE,
|
||||
height: CSM_MAP_SIZE,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: CSM_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
(texture, view)
|
||||
};
|
||||
|
||||
let (t0, v0) = create_cascade("CSM Cascade 0");
|
||||
let (t1, v1) = create_cascade("CSM Cascade 1");
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("CSM Sampler"),
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
|
||||
compare: Some(wgpu::CompareFunction::LessEqual),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
Self {
|
||||
textures: [t0, t1],
|
||||
views: [v0, v1],
|
||||
sampler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// CSM uniform data: 2 light-view-proj matrices, cascade split distance, shadow params.
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
|
||||
pub struct CsmUniform {
|
||||
pub light_view_proj: [[[f32; 4]; 4]; CSM_CASCADE_COUNT], // 128 bytes
|
||||
pub cascade_split: f32, // view-space depth where cascade 0 ends and cascade 1 begins
|
||||
pub shadow_map_size: f32,
|
||||
pub shadow_bias: f32,
|
||||
pub _padding: f32, // 16 bytes total for last row
|
||||
}
|
||||
|
||||
/// Compute the 8 corners of a sub-frustum in world space, given the camera's
|
||||
/// inverse view-projection and sub-frustum near/far in NDC z [0..1] range.
|
||||
fn frustum_corners_world(inv_vp: &Mat4, z_near_ndc: f32, z_far_ndc: f32) -> [Vec3; 8] {
|
||||
let ndc_corners = [
|
||||
// Near plane corners
|
||||
Vec4::new(-1.0, -1.0, z_near_ndc, 1.0),
|
||||
Vec4::new( 1.0, -1.0, z_near_ndc, 1.0),
|
||||
Vec4::new( 1.0, 1.0, z_near_ndc, 1.0),
|
||||
Vec4::new(-1.0, 1.0, z_near_ndc, 1.0),
|
||||
// Far plane corners
|
||||
Vec4::new(-1.0, -1.0, z_far_ndc, 1.0),
|
||||
Vec4::new( 1.0, -1.0, z_far_ndc, 1.0),
|
||||
Vec4::new( 1.0, 1.0, z_far_ndc, 1.0),
|
||||
Vec4::new(-1.0, 1.0, z_far_ndc, 1.0),
|
||||
];
|
||||
|
||||
let mut world_corners = [Vec3::ZERO; 8];
|
||||
for (i, ndc) in ndc_corners.iter().enumerate() {
|
||||
let w = inv_vp.mul_vec4(*ndc);
|
||||
world_corners[i] = Vec3::new(w.x / w.w, w.y / w.w, w.z / w.w);
|
||||
}
|
||||
world_corners
|
||||
}
|
||||
|
||||
/// Compute a tight orthographic light-view-projection matrix for a set of frustum corners.
|
||||
fn light_matrix_for_corners(light_dir: Vec3, corners: &[Vec3; 8]) -> Mat4 {
|
||||
// Build a light-space view matrix looking in the light direction.
|
||||
let center = {
|
||||
let mut c = Vec3::ZERO;
|
||||
for corner in corners {
|
||||
c = c + *corner;
|
||||
}
|
||||
c * (1.0 / 8.0)
|
||||
};
|
||||
|
||||
// Pick a stable up vector that isn't parallel to light_dir.
|
||||
let up = if light_dir.cross(Vec3::Y).length_squared() < 1e-6 {
|
||||
Vec3::Z
|
||||
} else {
|
||||
Vec3::Y
|
||||
};
|
||||
|
||||
let light_view = Mat4::look_at(
|
||||
center - light_dir * 0.5, // eye slightly behind center along light direction
|
||||
center,
|
||||
up,
|
||||
);
|
||||
|
||||
// Transform all corners into light view space and find AABB.
|
||||
let mut min_x = f32::MAX;
|
||||
let mut max_x = f32::MIN;
|
||||
let mut min_y = f32::MAX;
|
||||
let mut max_y = f32::MIN;
|
||||
let mut min_z = f32::MAX;
|
||||
let mut max_z = f32::MIN;
|
||||
|
||||
for corner in corners {
|
||||
let v = light_view.mul_vec4(Vec4::from_vec3(*corner, 1.0));
|
||||
let p = Vec3::new(v.x, v.y, v.z);
|
||||
min_x = min_x.min(p.x);
|
||||
max_x = max_x.max(p.x);
|
||||
min_y = min_y.min(p.y);
|
||||
max_y = max_y.max(p.y);
|
||||
min_z = min_z.min(p.z);
|
||||
max_z = max_z.max(p.z);
|
||||
}
|
||||
|
||||
// Extend the z range to catch shadow casters behind the frustum.
|
||||
let z_margin = (max_z - min_z) * 2.0;
|
||||
min_z -= z_margin;
|
||||
|
||||
let light_proj = Mat4::orthographic(min_x, max_x, min_y, max_y, -max_z, -min_z);
|
||||
|
||||
light_proj.mul_mat4(&light_view)
|
||||
}
|
||||
|
||||
/// Compute cascade light-view-projection matrices for 2 cascades.
|
||||
///
|
||||
/// - `light_dir`: normalized direction **toward** the light source (opposite of light travel).
|
||||
/// Internally we negate it to get the light travel direction.
|
||||
/// - `camera_view`, `camera_proj`: the camera's view and projection matrices.
|
||||
/// - `near`, `far`: camera near/far planes.
|
||||
/// - `split`: the view-space depth where cascade 0 ends and cascade 1 begins.
|
||||
///
|
||||
/// Returns two light-view-projection matrices, one for each cascade.
|
||||
pub fn compute_cascade_matrices(
|
||||
light_dir: Vec3,
|
||||
camera_view: &Mat4,
|
||||
camera_proj: &Mat4,
|
||||
near: f32,
|
||||
far: f32,
|
||||
split: f32,
|
||||
) -> [Mat4; CSM_CASCADE_COUNT] {
|
||||
let vp = camera_proj.mul_mat4(camera_view);
|
||||
let inv_vp = vp.inverse().expect("Camera VP matrix must be invertible");
|
||||
|
||||
// Map view-space depth to NDC z. For wgpu perspective:
|
||||
// ndc_z = (far * (z_view + near)) / (z_view * (far - near))
|
||||
// But since z_view is negative in RH, and we want the NDC value, we use
|
||||
// the projection matrix directly by projecting (0, 0, -depth, 1).
|
||||
let depth_to_ndc = |depth: f32| -> f32 {
|
||||
let clip = camera_proj.mul_vec4(Vec4::new(0.0, 0.0, -depth, 1.0));
|
||||
clip.z / clip.w
|
||||
};
|
||||
|
||||
let ndc_near = depth_to_ndc(near);
|
||||
let ndc_split = depth_to_ndc(split);
|
||||
let ndc_far = depth_to_ndc(far);
|
||||
|
||||
// Light direction is the direction light travels (away from the source).
|
||||
let dir = (-light_dir).normalize();
|
||||
|
||||
let corners0 = frustum_corners_world(&inv_vp, ndc_near, ndc_split);
|
||||
let corners1 = frustum_corners_world(&inv_vp, ndc_split, ndc_far);
|
||||
|
||||
[
|
||||
light_matrix_for_corners(dir, &corners0),
|
||||
light_matrix_for_corners(dir, &corners1),
|
||||
]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::mem;
|
||||
|
||||
#[test]
|
||||
fn test_csm_uniform_size() {
|
||||
// Must be multiple of 16 for WGSL uniform alignment
|
||||
assert_eq!(mem::size_of::<CsmUniform>() % 16, 0,
|
||||
"CsmUniform must be 16-byte aligned, got {} bytes", mem::size_of::<CsmUniform>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_cascade_matrices_produces_valid_matrices() {
|
||||
let light_dir = Vec3::new(0.0, -1.0, -1.0).normalize();
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 5.0, 10.0), Vec3::ZERO, Vec3::Y);
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_4, 16.0 / 9.0, 0.1, 100.0);
|
||||
|
||||
let matrices = compute_cascade_matrices(light_dir, &view, &proj, 0.1, 100.0, 20.0);
|
||||
|
||||
// Both matrices should not be identity (they should be actual projections)
|
||||
assert_ne!(matrices[0].cols, Mat4::IDENTITY.cols, "Cascade 0 should not be identity");
|
||||
assert_ne!(matrices[1].cols, Mat4::IDENTITY.cols, "Cascade 1 should not be identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cascade_split_distance() {
|
||||
// The split distance should partition the frustum
|
||||
let light_dir = Vec3::new(0.0, -1.0, 0.0).normalize();
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 1.0, 50.0);
|
||||
|
||||
let split = 15.0;
|
||||
let matrices = compute_cascade_matrices(light_dir, &view, &proj, 1.0, 50.0, split);
|
||||
|
||||
// Both matrices should be different (covering different frustum regions)
|
||||
let differ = matrices[0].cols.iter()
|
||||
.zip(matrices[1].cols.iter())
|
||||
.any(|(a, b)| {
|
||||
a.iter().zip(b.iter()).any(|(x, y)| (x - y).abs() > 1e-3)
|
||||
});
|
||||
assert!(differ, "Cascade matrices should differ for different frustum regions");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frustum_corners_world_identity() {
|
||||
// With identity inverse VP, corners should be at NDC positions.
|
||||
let inv_vp = Mat4::IDENTITY;
|
||||
let corners = frustum_corners_world(&inv_vp, 0.0, 1.0);
|
||||
|
||||
// Near plane at z=0
|
||||
assert!((corners[0].z - 0.0).abs() < 1e-5);
|
||||
// Far plane at z=1
|
||||
assert!((corners[4].z - 1.0).abs() < 1e-5);
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,10 @@ struct MaterialUniform {
|
||||
@group(1) @binding(1) var s_albedo: sampler;
|
||||
@group(1) @binding(2) var t_normal: texture_2d<f32>;
|
||||
@group(1) @binding(3) var s_normal: sampler;
|
||||
@group(1) @binding(4) var t_orm: texture_2d<f32>;
|
||||
@group(1) @binding(5) var s_orm: sampler;
|
||||
@group(1) @binding(6) var t_emissive: texture_2d<f32>;
|
||||
@group(1) @binding(7) var s_emissive: sampler;
|
||||
|
||||
@group(2) @binding(0) var<uniform> material: MaterialUniform;
|
||||
|
||||
@@ -84,11 +88,21 @@ fn fs_main(in: VertexOutput) -> GBufferOutput {
|
||||
let TBN = mat3x3<f32>(T, B, N_geom);
|
||||
let N = normalize(TBN * tangent_normal);
|
||||
|
||||
// Sample ORM texture: R=AO, G=Roughness, B=Metallic; multiply with material params
|
||||
let orm_sample = textureSample(t_orm, s_orm, in.uv);
|
||||
let ao = orm_sample.r * material.ao;
|
||||
let roughness = orm_sample.g * material.roughness;
|
||||
let metallic = orm_sample.b * material.metallic;
|
||||
|
||||
// Sample emissive texture and compute luminance
|
||||
let emissive = textureSample(t_emissive, s_emissive, in.uv).rgb;
|
||||
let emissive_lum = dot(emissive, vec3<f32>(0.299, 0.587, 0.114));
|
||||
|
||||
var out: GBufferOutput;
|
||||
out.position = vec4<f32>(in.world_pos, 1.0);
|
||||
out.normal = vec4<f32>(N * 0.5 + 0.5, 1.0);
|
||||
out.albedo = vec4<f32>(albedo, material.base_color.a * tex_color.a);
|
||||
out.material_data = vec4<f32>(material.metallic, material.roughness, material.ao, 1.0);
|
||||
out.material_data = vec4<f32>(metallic, roughness, ao, emissive_lum);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -42,6 +42,10 @@ struct ShadowUniform {
|
||||
light_view_proj: mat4x4<f32>,
|
||||
shadow_map_size: f32,
|
||||
shadow_bias: f32,
|
||||
_padding: vec2<f32>,
|
||||
sun_direction: vec3<f32>,
|
||||
turbidity: f32,
|
||||
sh_coefficients: array<vec4<f32>, 7>,
|
||||
};
|
||||
|
||||
@group(2) @binding(0) var t_shadow: texture_depth_2d;
|
||||
@@ -260,6 +264,7 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let metallic = mat_sample.r;
|
||||
let roughness = mat_sample.g;
|
||||
let ao = mat_sample.b;
|
||||
let emissive_lum = mat_sample.w;
|
||||
|
||||
let V = normalize(camera_uniform.camera_pos - world_pos);
|
||||
|
||||
@@ -306,7 +311,10 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let ambient = (diffuse_ibl + specular_ibl) * ao * ssgi_ao + ssgi_indirect;
|
||||
|
||||
// Output raw HDR linear colour; tonemap is applied in a separate tonemap pass.
|
||||
let color = ambient + Lo;
|
||||
var color = ambient + Lo;
|
||||
|
||||
// Add emissive contribution (luminance stored in G-Buffer, modulated by albedo)
|
||||
color += albedo * emissive_lum;
|
||||
|
||||
return vec4<f32>(color, alpha);
|
||||
}
|
||||
|
||||
262
crates/voltex_renderer/src/frustum.rs
Normal file
262
crates/voltex_renderer/src/frustum.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
use voltex_math::Vec3;
|
||||
use crate::light::{LightsUniform, LIGHT_DIRECTIONAL, LIGHT_POINT};
|
||||
|
||||
/// A plane in 3D space: normal.dot(point) + d = 0
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Plane {
|
||||
pub normal: Vec3,
|
||||
pub d: f32,
|
||||
}
|
||||
|
||||
impl Plane {
|
||||
/// Normalize the plane equation so that |normal| == 1.
|
||||
pub fn normalize(&self) -> Self {
|
||||
let len = self.normal.length();
|
||||
if len < 1e-10 {
|
||||
return *self;
|
||||
}
|
||||
Self {
|
||||
normal: Vec3::new(self.normal.x / len, self.normal.y / len, self.normal.z / len),
|
||||
d: self.d / len,
|
||||
}
|
||||
}
|
||||
|
||||
/// Signed distance from a point to the plane (positive = inside / front).
|
||||
pub fn distance(&self, point: Vec3) -> f32 {
|
||||
self.normal.dot(point) + self.d
|
||||
}
|
||||
}
|
||||
|
||||
/// Six-plane frustum (left, right, bottom, top, near, far).
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Frustum {
|
||||
pub planes: [Plane; 6],
|
||||
}
|
||||
|
||||
/// Extract 6 frustum planes from a view-projection matrix using the
|
||||
/// Gribb-Hartmann method.
|
||||
///
|
||||
/// The planes point inward so that a point is inside if distance >= 0 for all planes.
|
||||
/// Matrix is column-major `[[f32;4];4]` (same as `Mat4::cols`).
|
||||
pub fn extract_frustum(view_proj: &voltex_math::Mat4) -> Frustum {
|
||||
// We work with rows of the VP matrix.
|
||||
// For column-major storage cols[c][r]:
|
||||
// row[r] = (cols[0][r], cols[1][r], cols[2][r], cols[3][r])
|
||||
let m = &view_proj.cols;
|
||||
|
||||
let row = |r: usize| -> [f32; 4] {
|
||||
[m[0][r], m[1][r], m[2][r], m[3][r]]
|
||||
};
|
||||
|
||||
let r0 = row(0);
|
||||
let r1 = row(1);
|
||||
let r2 = row(2);
|
||||
let r3 = row(3);
|
||||
|
||||
// Left: row3 + row0
|
||||
let left = Plane {
|
||||
normal: Vec3::new(r3[0] + r0[0], r3[1] + r0[1], r3[2] + r0[2]),
|
||||
d: r3[3] + r0[3],
|
||||
}.normalize();
|
||||
|
||||
// Right: row3 - row0
|
||||
let right = Plane {
|
||||
normal: Vec3::new(r3[0] - r0[0], r3[1] - r0[1], r3[2] - r0[2]),
|
||||
d: r3[3] - r0[3],
|
||||
}.normalize();
|
||||
|
||||
// Bottom: row3 + row1
|
||||
let bottom = Plane {
|
||||
normal: Vec3::new(r3[0] + r1[0], r3[1] + r1[1], r3[2] + r1[2]),
|
||||
d: r3[3] + r1[3],
|
||||
}.normalize();
|
||||
|
||||
// Top: row3 - row1
|
||||
let top = Plane {
|
||||
normal: Vec3::new(r3[0] - r1[0], r3[1] - r1[1], r3[2] - r1[2]),
|
||||
d: r3[3] - r1[3],
|
||||
}.normalize();
|
||||
|
||||
// Near: row2 (wgpu NDC z in [0,1], so near = row2 directly)
|
||||
let near = Plane {
|
||||
normal: Vec3::new(r2[0], r2[1], r2[2]),
|
||||
d: r2[3],
|
||||
}.normalize();
|
||||
|
||||
// Far: row3 - row2
|
||||
let far = Plane {
|
||||
normal: Vec3::new(r3[0] - r2[0], r3[1] - r2[1], r3[2] - r2[2]),
|
||||
d: r3[3] - r2[3],
|
||||
}.normalize();
|
||||
|
||||
Frustum {
|
||||
planes: [left, right, bottom, top, near, far],
|
||||
}
|
||||
}
|
||||
|
||||
/// Test whether a sphere (center, radius) is at least partially inside the frustum.
|
||||
pub fn sphere_vs_frustum(center: Vec3, radius: f32, frustum: &Frustum) -> bool {
|
||||
for plane in &frustum.planes {
|
||||
if plane.distance(center) < -radius {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Return indices of lights from `lights` that are visible in the given frustum.
|
||||
///
|
||||
/// - Directional lights are always included.
|
||||
/// - Point lights use a bounding sphere (position, range).
|
||||
/// - Spot lights use a conservative bounding sphere centered at the light position
|
||||
/// with radius equal to the light range.
|
||||
pub fn cull_lights(frustum: &Frustum, lights: &LightsUniform) -> Vec<usize> {
|
||||
let count = lights.count as usize;
|
||||
let mut visible = Vec::with_capacity(count);
|
||||
|
||||
for i in 0..count {
|
||||
let light = &lights.lights[i];
|
||||
if light.light_type == LIGHT_DIRECTIONAL {
|
||||
// Directional lights affect everything
|
||||
visible.push(i);
|
||||
} else if light.light_type == LIGHT_POINT {
|
||||
let center = Vec3::new(light.position[0], light.position[1], light.position[2]);
|
||||
if sphere_vs_frustum(center, light.range, frustum) {
|
||||
visible.push(i);
|
||||
}
|
||||
} else {
|
||||
// Spot light — use bounding sphere at position with radius = range
|
||||
let center = Vec3::new(light.position[0], light.position[1], light.position[2]);
|
||||
if sphere_vs_frustum(center, light.range, frustum) {
|
||||
visible.push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
visible
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use voltex_math::Mat4;
|
||||
use crate::light::{LightData, LightsUniform};
|
||||
|
||||
fn approx_eq(a: f32, b: f32, eps: f32) -> bool {
|
||||
(a - b).abs() < eps
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frustum_extraction_identity() {
|
||||
// Identity VP means clip space = NDC directly.
|
||||
// For wgpu: x,y in [-1,1], z in [0,1].
|
||||
let frustum = extract_frustum(&Mat4::IDENTITY);
|
||||
|
||||
// All 6 planes should be normalized (length ~1)
|
||||
for (i, plane) in frustum.planes.iter().enumerate() {
|
||||
let len = plane.normal.length();
|
||||
assert!(approx_eq(len, 1.0, 1e-4), "Plane {} normal length = {}", i, len);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frustum_extraction_perspective() {
|
||||
let proj = Mat4::perspective(
|
||||
std::f32::consts::FRAC_PI_2, // 90 deg
|
||||
1.0,
|
||||
0.1,
|
||||
100.0,
|
||||
);
|
||||
let view = Mat4::look_at(
|
||||
Vec3::new(0.0, 0.0, 5.0),
|
||||
Vec3::ZERO,
|
||||
Vec3::Y,
|
||||
);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
// Origin (0,0,0) should be inside the frustum (it's 5 units in front of camera)
|
||||
assert!(sphere_vs_frustum(Vec3::ZERO, 0.0, &frustum),
|
||||
"Origin should be inside frustum");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sphere_inside_frustum() {
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 0.1, 100.0);
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
// Sphere at origin with radius 1 — well inside
|
||||
assert!(sphere_vs_frustum(Vec3::ZERO, 1.0, &frustum));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sphere_outside_frustum() {
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 0.1, 100.0);
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
// Sphere far behind the camera
|
||||
assert!(!sphere_vs_frustum(Vec3::new(0.0, 0.0, 200.0), 1.0, &frustum),
|
||||
"Sphere far behind camera should be outside");
|
||||
|
||||
// Sphere far to the side
|
||||
assert!(!sphere_vs_frustum(Vec3::new(500.0, 0.0, 0.0), 1.0, &frustum),
|
||||
"Sphere far to the side should be outside");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sphere_partially_inside() {
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 0.1, 100.0);
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
// Sphere at far plane boundary but with large radius should be inside
|
||||
assert!(sphere_vs_frustum(Vec3::new(0.0, 0.0, -96.0), 5.0, &frustum));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cull_lights_directional_always_included() {
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 0.1, 50.0);
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
let mut lights = LightsUniform::new();
|
||||
lights.add_light(LightData::directional([0.0, -1.0, 0.0], [1.0, 1.0, 1.0], 1.0));
|
||||
lights.add_light(LightData::point([0.0, 0.0, 0.0], [1.0, 0.0, 0.0], 2.0, 5.0));
|
||||
// Point light far away — should be culled
|
||||
lights.add_light(LightData::point([500.0, 500.0, 500.0], [0.0, 1.0, 0.0], 2.0, 1.0));
|
||||
|
||||
let visible = cull_lights(&frustum, &lights);
|
||||
// Directional (0) always included, point at origin (1) inside, far point (2) culled
|
||||
assert!(visible.contains(&0), "Directional light must always be included");
|
||||
assert!(visible.contains(&1), "Point light at origin should be visible");
|
||||
assert!(!visible.contains(&2), "Far point light should be culled");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cull_lights_spot() {
|
||||
let proj = Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, 0.1, 50.0);
|
||||
let view = Mat4::look_at(Vec3::new(0.0, 0.0, 5.0), Vec3::ZERO, Vec3::Y);
|
||||
let vp = proj.mul_mat4(&view);
|
||||
let frustum = extract_frustum(&vp);
|
||||
|
||||
let mut lights = LightsUniform::new();
|
||||
// Spot light inside frustum
|
||||
lights.add_light(LightData::spot(
|
||||
[0.0, 2.0, 0.0], [0.0, -1.0, 0.0], [1.0, 1.0, 1.0], 3.0, 10.0, 15.0, 30.0,
|
||||
));
|
||||
// Spot light far away
|
||||
lights.add_light(LightData::spot(
|
||||
[300.0, 300.0, 300.0], [0.0, -1.0, 0.0], [1.0, 1.0, 1.0], 3.0, 5.0, 15.0, 30.0,
|
||||
));
|
||||
|
||||
let visible = cull_lights(&frustum, &lights);
|
||||
assert!(visible.contains(&0), "Near spot should be visible");
|
||||
assert!(!visible.contains(&1), "Far spot should be culled");
|
||||
}
|
||||
}
|
||||
580
crates/voltex_renderer/src/gltf.rs
Normal file
580
crates/voltex_renderer/src/gltf.rs
Normal file
@@ -0,0 +1,580 @@
|
||||
use crate::json_parser::{self, JsonValue};
|
||||
use crate::vertex::MeshVertex;
|
||||
use crate::obj::compute_tangents;
|
||||
|
||||
pub struct GltfData {
|
||||
pub meshes: Vec<GltfMesh>,
|
||||
}
|
||||
|
||||
pub struct GltfMesh {
|
||||
pub vertices: Vec<MeshVertex>,
|
||||
pub indices: Vec<u32>,
|
||||
pub name: Option<String>,
|
||||
pub material: Option<GltfMaterial>,
|
||||
}
|
||||
|
||||
pub struct GltfMaterial {
|
||||
pub base_color: [f32; 4],
|
||||
pub metallic: f32,
|
||||
pub roughness: f32,
|
||||
}
|
||||
|
||||
const GLB_MAGIC: u32 = 0x46546C67;
|
||||
const GLB_VERSION: u32 = 2;
|
||||
const CHUNK_JSON: u32 = 0x4E4F534A;
|
||||
const CHUNK_BIN: u32 = 0x004E4942;
|
||||
|
||||
pub fn parse_gltf(data: &[u8]) -> Result<GltfData, String> {
|
||||
if data.len() < 4 {
|
||||
return Err("Data too short".into());
|
||||
}
|
||||
|
||||
// Detect format: GLB (binary) or JSON
|
||||
let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
|
||||
if magic == GLB_MAGIC {
|
||||
parse_glb(data)
|
||||
} else if data[0] == b'{' {
|
||||
parse_gltf_json(data)
|
||||
} else {
|
||||
Err("Unknown glTF format: not GLB or JSON".into())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_glb(data: &[u8]) -> Result<GltfData, String> {
|
||||
if data.len() < 12 {
|
||||
return Err("GLB header too short".into());
|
||||
}
|
||||
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
|
||||
if version != GLB_VERSION {
|
||||
return Err(format!("Unsupported GLB version: {} (expected 2)", version));
|
||||
}
|
||||
let _total_len = u32::from_le_bytes([data[8], data[9], data[10], data[11]]) as usize;
|
||||
|
||||
// Parse chunks
|
||||
let mut pos = 12;
|
||||
let mut json_str = String::new();
|
||||
let mut bin_data: Vec<u8> = Vec::new();
|
||||
|
||||
while pos + 8 <= data.len() {
|
||||
let chunk_len = u32::from_le_bytes([data[pos], data[pos+1], data[pos+2], data[pos+3]]) as usize;
|
||||
let chunk_type = u32::from_le_bytes([data[pos+4], data[pos+5], data[pos+6], data[pos+7]]);
|
||||
pos += 8;
|
||||
|
||||
if pos + chunk_len > data.len() {
|
||||
return Err("Chunk extends past data".into());
|
||||
}
|
||||
|
||||
match chunk_type {
|
||||
CHUNK_JSON => {
|
||||
json_str = std::str::from_utf8(&data[pos..pos + chunk_len])
|
||||
.map_err(|_| "Invalid UTF-8 in JSON chunk".to_string())?
|
||||
.to_string();
|
||||
}
|
||||
CHUNK_BIN => {
|
||||
bin_data = data[pos..pos + chunk_len].to_vec();
|
||||
}
|
||||
_ => {} // skip unknown chunks
|
||||
}
|
||||
pos += chunk_len;
|
||||
// Chunks are 4-byte aligned
|
||||
pos = (pos + 3) & !3;
|
||||
}
|
||||
|
||||
if json_str.is_empty() {
|
||||
return Err("No JSON chunk found in GLB".into());
|
||||
}
|
||||
|
||||
let json = json_parser::parse_json(&json_str)?;
|
||||
let buffers = vec![bin_data]; // GLB has one implicit binary buffer
|
||||
extract_meshes(&json, &buffers)
|
||||
}
|
||||
|
||||
fn parse_gltf_json(data: &[u8]) -> Result<GltfData, String> {
|
||||
let json_str = std::str::from_utf8(data).map_err(|_| "Invalid UTF-8".to_string())?;
|
||||
let json = json_parser::parse_json(json_str)?;
|
||||
|
||||
// Resolve buffers (embedded base64 URIs)
|
||||
let mut buffers = Vec::new();
|
||||
if let Some(bufs) = json.get("buffers").and_then(|v| v.as_array()) {
|
||||
for buf in bufs {
|
||||
if let Some(uri) = buf.get("uri").and_then(|v| v.as_str()) {
|
||||
if let Some(b64) = uri.strip_prefix("data:application/octet-stream;base64,") {
|
||||
buffers.push(decode_base64(b64)?);
|
||||
} else if let Some(b64) = uri.strip_prefix("data:application/gltf-buffer;base64,") {
|
||||
buffers.push(decode_base64(b64)?);
|
||||
} else {
|
||||
return Err(format!("External buffer URIs not supported: {}", uri));
|
||||
}
|
||||
} else {
|
||||
buffers.push(Vec::new());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extract_meshes(&json, &buffers)
|
||||
}
|
||||
|
||||
fn decode_base64(input: &str) -> Result<Vec<u8>, String> {
|
||||
let table = |c: u8| -> Result<u8, String> {
|
||||
match c {
|
||||
b'A'..=b'Z' => Ok(c - b'A'),
|
||||
b'a'..=b'z' => Ok(c - b'a' + 26),
|
||||
b'0'..=b'9' => Ok(c - b'0' + 52),
|
||||
b'+' => Ok(62),
|
||||
b'/' => Ok(63),
|
||||
b'=' => Ok(0), // padding
|
||||
_ => Err(format!("Invalid base64 character: {}", c as char)),
|
||||
}
|
||||
};
|
||||
|
||||
let bytes: Vec<u8> = input.bytes().filter(|&b| b != b'\n' && b != b'\r' && b != b' ').collect();
|
||||
let mut out = Vec::with_capacity(bytes.len() * 3 / 4);
|
||||
|
||||
for chunk in bytes.chunks(4) {
|
||||
let b0 = table(chunk[0])?;
|
||||
let b1 = if chunk.len() > 1 { table(chunk[1])? } else { 0 };
|
||||
let b2 = if chunk.len() > 2 { table(chunk[2])? } else { 0 };
|
||||
let b3 = if chunk.len() > 3 { table(chunk[3])? } else { 0 };
|
||||
|
||||
out.push((b0 << 2) | (b1 >> 4));
|
||||
if chunk.len() > 2 && chunk[2] != b'=' {
|
||||
out.push((b1 << 4) | (b2 >> 2));
|
||||
}
|
||||
if chunk.len() > 3 && chunk[3] != b'=' {
|
||||
out.push((b2 << 6) | b3);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn extract_meshes(json: &JsonValue, buffers: &[Vec<u8>]) -> Result<GltfData, String> {
|
||||
let empty_arr: Vec<JsonValue> = Vec::new();
|
||||
let accessors = json.get("accessors").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let buffer_views = json.get("bufferViews").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let materials_json = json.get("materials").and_then(|v| v.as_array());
|
||||
|
||||
let mut meshes = Vec::new();
|
||||
|
||||
let mesh_list = json.get("meshes").and_then(|v| v.as_array())
|
||||
.ok_or("No meshes in glTF")?;
|
||||
|
||||
for mesh_val in mesh_list {
|
||||
let name = mesh_val.get("name").and_then(|v| v.as_str()).map(|s| s.to_string());
|
||||
let primitives = mesh_val.get("primitives").and_then(|v| v.as_array())
|
||||
.ok_or("Mesh has no primitives")?;
|
||||
|
||||
for prim in primitives {
|
||||
let attrs = prim.get("attributes").and_then(|v| v.as_object())
|
||||
.ok_or("Primitive has no attributes")?;
|
||||
|
||||
// Read position data (required)
|
||||
let pos_idx = attrs.iter().find(|(k, _)| k == "POSITION")
|
||||
.and_then(|(_, v)| v.as_u32())
|
||||
.ok_or("Missing POSITION attribute")? as usize;
|
||||
let positions = read_accessor_vec3(accessors, buffer_views, buffers, pos_idx)?;
|
||||
|
||||
// Read normals (optional)
|
||||
let normals = if let Some(idx) = attrs.iter().find(|(k, _)| k == "NORMAL").and_then(|(_, v)| v.as_u32()) {
|
||||
read_accessor_vec3(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
vec![[0.0, 1.0, 0.0]; positions.len()]
|
||||
};
|
||||
|
||||
// Read UVs (optional)
|
||||
let uvs = if let Some(idx) = attrs.iter().find(|(k, _)| k == "TEXCOORD_0").and_then(|(_, v)| v.as_u32()) {
|
||||
read_accessor_vec2(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
vec![[0.0, 0.0]; positions.len()]
|
||||
};
|
||||
|
||||
// Read tangents (optional)
|
||||
let tangents = if let Some(idx) = attrs.iter().find(|(k, _)| k == "TANGENT").and_then(|(_, v)| v.as_u32()) {
|
||||
Some(read_accessor_vec4(accessors, buffer_views, buffers, idx as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Read indices
|
||||
let indices = if let Some(idx) = prim.get("indices").and_then(|v| v.as_u32()) {
|
||||
read_accessor_indices(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
// No indices — generate sequential
|
||||
(0..positions.len() as u32).collect()
|
||||
};
|
||||
|
||||
// Assemble vertices
|
||||
let mut vertices: Vec<MeshVertex> = Vec::with_capacity(positions.len());
|
||||
for i in 0..positions.len() {
|
||||
vertices.push(MeshVertex {
|
||||
position: positions[i],
|
||||
normal: normals[i],
|
||||
uv: uvs[i],
|
||||
tangent: tangents.as_ref().map_or([0.0; 4], |t| t[i]),
|
||||
});
|
||||
}
|
||||
|
||||
// Compute tangents if not provided
|
||||
if tangents.is_none() {
|
||||
compute_tangents(&mut vertices, &indices);
|
||||
}
|
||||
|
||||
// Read material
|
||||
let material = prim.get("material")
|
||||
.and_then(|v| v.as_u32())
|
||||
.and_then(|idx| materials_json?.get(idx as usize))
|
||||
.and_then(|mat| extract_material(mat));
|
||||
|
||||
meshes.push(GltfMesh { vertices, indices, name: name.clone(), material });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GltfData { meshes })
|
||||
}
|
||||
|
||||
fn get_buffer_data<'a>(
|
||||
accessor: &JsonValue,
|
||||
buffer_views: &[JsonValue],
|
||||
buffers: &'a [Vec<u8>],
|
||||
) -> Result<(&'a [u8], usize), String> {
|
||||
let bv_idx = accessor.get("bufferView").and_then(|v| v.as_u32())
|
||||
.ok_or("Accessor missing bufferView")? as usize;
|
||||
let bv = buffer_views.get(bv_idx).ok_or("BufferView index out of range")?;
|
||||
let buf_idx = bv.get("buffer").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let bv_offset = bv.get("byteOffset").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let acc_offset = accessor.get("byteOffset").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let buffer = buffers.get(buf_idx).ok_or("Buffer index out of range")?;
|
||||
let offset = bv_offset + acc_offset;
|
||||
Ok((buffer, offset))
|
||||
}
|
||||
|
||||
fn read_accessor_vec3(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 3]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 12;
|
||||
if o + 12 > buffer.len() { return Err("Buffer overflow reading vec3".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
let z = f32::from_le_bytes([buffer[o+8], buffer[o+9], buffer[o+10], buffer[o+11]]);
|
||||
result.push([x, y, z]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_vec2(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 2]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 8;
|
||||
if o + 8 > buffer.len() { return Err("Buffer overflow reading vec2".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
result.push([x, y]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_vec4(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 4]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 16;
|
||||
if o + 16 > buffer.len() { return Err("Buffer overflow reading vec4".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
let z = f32::from_le_bytes([buffer[o+8], buffer[o+9], buffer[o+10], buffer[o+11]]);
|
||||
let w = f32::from_le_bytes([buffer[o+12], buffer[o+13], buffer[o+14], buffer[o+15]]);
|
||||
result.push([x, y, z, w]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_indices(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<u32>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let comp_type = acc.get("componentType").and_then(|v| v.as_u32()).ok_or("Missing componentType")?;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
|
||||
let mut result = Vec::with_capacity(count);
|
||||
match comp_type {
|
||||
5121 => { // UNSIGNED_BYTE
|
||||
for i in 0..count {
|
||||
if offset + i >= buffer.len() { return Err("Buffer overflow reading u8 indices".into()); }
|
||||
result.push(buffer[offset + i] as u32);
|
||||
}
|
||||
}
|
||||
5123 => { // UNSIGNED_SHORT
|
||||
for i in 0..count {
|
||||
let o = offset + i * 2;
|
||||
if o + 2 > buffer.len() { return Err("Buffer overflow reading u16 indices".into()); }
|
||||
result.push(u16::from_le_bytes([buffer[o], buffer[o+1]]) as u32);
|
||||
}
|
||||
}
|
||||
5125 => { // UNSIGNED_INT
|
||||
for i in 0..count {
|
||||
let o = offset + i * 4;
|
||||
if o + 4 > buffer.len() { return Err("Buffer overflow reading u32 indices".into()); }
|
||||
result.push(u32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]));
|
||||
}
|
||||
}
|
||||
_ => return Err(format!("Unsupported index component type: {}", comp_type)),
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn extract_material(mat: &JsonValue) -> Option<GltfMaterial> {
|
||||
let pbr = mat.get("pbrMetallicRoughness")?;
|
||||
let base_color = if let Some(arr) = pbr.get("baseColorFactor").and_then(|v| v.as_array()) {
|
||||
[
|
||||
arr.get(0).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(3).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
]
|
||||
} else {
|
||||
[1.0, 1.0, 1.0, 1.0]
|
||||
};
|
||||
let metallic = pbr.get("metallicFactor").and_then(|v| v.as_f64()).unwrap_or(1.0) as f32;
|
||||
let roughness = pbr.get("roughnessFactor").and_then(|v| v.as_f64()).unwrap_or(1.0) as f32;
|
||||
Some(GltfMaterial { base_color, metallic, roughness })
|
||||
}
|
||||
|
||||
// Helper functions for tests
|
||||
#[allow(dead_code)]
|
||||
fn read_floats(buffer: &[u8], offset: usize, count: usize) -> Vec<f32> {
|
||||
(0..count).map(|i| {
|
||||
let o = offset + i * 4;
|
||||
f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]])
|
||||
}).collect()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn read_indices_u16(buffer: &[u8], offset: usize, count: usize) -> Vec<u32> {
|
||||
(0..count).map(|i| {
|
||||
let o = offset + i * 2;
|
||||
u16::from_le_bytes([buffer[o], buffer[o+1]]) as u32
|
||||
}).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_glb_header_magic() {
|
||||
// Invalid magic
|
||||
let data = [0u8; 12];
|
||||
assert!(parse_gltf(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_glb_header_version() {
|
||||
// Valid magic but wrong version
|
||||
let mut data = Vec::new();
|
||||
data.extend_from_slice(&0x46546C67u32.to_le_bytes()); // magic "glTF"
|
||||
data.extend_from_slice(&1u32.to_le_bytes()); // version 1 (we need 2)
|
||||
data.extend_from_slice(&12u32.to_le_bytes()); // length
|
||||
assert!(parse_gltf(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base64_decode() {
|
||||
let encoded = "SGVsbG8="; // "Hello"
|
||||
let decoded = decode_base64(encoded).unwrap();
|
||||
assert_eq!(decoded, b"Hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base64_decode_no_padding() {
|
||||
let encoded = "SGVsbG8"; // "Hello" without padding
|
||||
let decoded = decode_base64(encoded).unwrap();
|
||||
assert_eq!(decoded, b"Hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_f32_accessor() {
|
||||
// Simulate a buffer with 3 float32 values
|
||||
let buffer: Vec<u8> = [1.0f32, 2.0, 3.0].iter()
|
||||
.flat_map(|f| f.to_le_bytes())
|
||||
.collect();
|
||||
let data = read_floats(&buffer, 0, 3);
|
||||
assert_eq!(data, vec![1.0, 2.0, 3.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_u16_indices() {
|
||||
let buffer: Vec<u8> = [0u16, 1, 2].iter()
|
||||
.flat_map(|i| i.to_le_bytes())
|
||||
.collect();
|
||||
let indices = read_indices_u16(&buffer, 0, 3);
|
||||
assert_eq!(indices, vec![0u32, 1, 2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_minimal_glb() {
|
||||
let glb = build_minimal_glb_triangle();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
assert_eq!(data.meshes.len(), 1);
|
||||
let mesh = &data.meshes[0];
|
||||
assert_eq!(mesh.vertices.len(), 3);
|
||||
assert_eq!(mesh.indices.len(), 3);
|
||||
// Verify positions
|
||||
assert_eq!(mesh.vertices[0].position, [0.0, 0.0, 0.0]);
|
||||
assert_eq!(mesh.vertices[1].position, [1.0, 0.0, 0.0]);
|
||||
assert_eq!(mesh.vertices[2].position, [0.0, 1.0, 0.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_glb_with_material() {
|
||||
let glb = build_glb_with_material();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
let mesh = &data.meshes[0];
|
||||
let mat = mesh.material.as_ref().unwrap();
|
||||
assert!((mat.base_color[0] - 1.0).abs() < 0.01);
|
||||
assert!((mat.metallic - 0.5).abs() < 0.01);
|
||||
assert!((mat.roughness - 0.8).abs() < 0.01);
|
||||
}
|
||||
|
||||
/// Build a minimal GLB with one triangle.
|
||||
fn build_minimal_glb_triangle() -> Vec<u8> {
|
||||
// Binary buffer: 3 positions (vec3) + 3 indices (u16)
|
||||
let mut bin = Vec::new();
|
||||
// Positions: 3 * vec3 = 36 bytes
|
||||
for &v in &[0.0f32, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0] {
|
||||
bin.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
// Indices: 3 * u16 = 6 bytes + 2 padding = 8 bytes
|
||||
for &i in &[0u16, 1, 2] {
|
||||
bin.extend_from_slice(&i.to_le_bytes());
|
||||
}
|
||||
bin.extend_from_slice(&[0, 0]); // padding to 4-byte alignment
|
||||
|
||||
let json_str = format!(r#"{{
|
||||
"asset": {{"version": "2.0"}},
|
||||
"buffers": [{{"byteLength": {}}}],
|
||||
"bufferViews": [
|
||||
{{"buffer": 0, "byteOffset": 0, "byteLength": 36}},
|
||||
{{"buffer": 0, "byteOffset": 36, "byteLength": 6}}
|
||||
],
|
||||
"accessors": [
|
||||
{{"bufferView": 0, "componentType": 5126, "count": 3, "type": "VEC3",
|
||||
"max": [1.0, 1.0, 0.0], "min": [0.0, 0.0, 0.0]}},
|
||||
{{"bufferView": 1, "componentType": 5123, "count": 3, "type": "SCALAR"}}
|
||||
],
|
||||
"meshes": [{{
|
||||
"name": "Triangle",
|
||||
"primitives": [{{
|
||||
"attributes": {{"POSITION": 0}},
|
||||
"indices": 1
|
||||
}}]
|
||||
}}]
|
||||
}}"#, bin.len());
|
||||
|
||||
let json_bytes = json_str.as_bytes();
|
||||
// Pad JSON to 4-byte alignment
|
||||
let json_padded_len = (json_bytes.len() + 3) & !3;
|
||||
let mut json_padded = json_bytes.to_vec();
|
||||
while json_padded.len() < json_padded_len {
|
||||
json_padded.push(b' ');
|
||||
}
|
||||
|
||||
let total_len = 12 + 8 + json_padded.len() + 8 + bin.len();
|
||||
let mut glb = Vec::with_capacity(total_len);
|
||||
|
||||
// Header
|
||||
glb.extend_from_slice(&0x46546C67u32.to_le_bytes()); // magic
|
||||
glb.extend_from_slice(&2u32.to_le_bytes()); // version
|
||||
glb.extend_from_slice(&(total_len as u32).to_le_bytes());
|
||||
|
||||
// JSON chunk
|
||||
glb.extend_from_slice(&(json_padded.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x4E4F534Au32.to_le_bytes()); // "JSON"
|
||||
glb.extend_from_slice(&json_padded);
|
||||
|
||||
// BIN chunk
|
||||
glb.extend_from_slice(&(bin.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x004E4942u32.to_le_bytes()); // "BIN\0"
|
||||
glb.extend_from_slice(&bin);
|
||||
|
||||
glb
|
||||
}
|
||||
|
||||
/// Build a GLB with one triangle and a material.
|
||||
fn build_glb_with_material() -> Vec<u8> {
|
||||
let mut bin = Vec::new();
|
||||
for &v in &[0.0f32, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0] {
|
||||
bin.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
for &i in &[0u16, 1, 2] {
|
||||
bin.extend_from_slice(&i.to_le_bytes());
|
||||
}
|
||||
bin.extend_from_slice(&[0, 0]); // padding
|
||||
|
||||
let json_str = format!(r#"{{
|
||||
"asset": {{"version": "2.0"}},
|
||||
"buffers": [{{"byteLength": {}}}],
|
||||
"bufferViews": [
|
||||
{{"buffer": 0, "byteOffset": 0, "byteLength": 36}},
|
||||
{{"buffer": 0, "byteOffset": 36, "byteLength": 6}}
|
||||
],
|
||||
"accessors": [
|
||||
{{"bufferView": 0, "componentType": 5126, "count": 3, "type": "VEC3",
|
||||
"max": [1.0, 1.0, 0.0], "min": [0.0, 0.0, 0.0]}},
|
||||
{{"bufferView": 1, "componentType": 5123, "count": 3, "type": "SCALAR"}}
|
||||
],
|
||||
"materials": [{{
|
||||
"pbrMetallicRoughness": {{
|
||||
"baseColorFactor": [1.0, 0.0, 0.0, 1.0],
|
||||
"metallicFactor": 0.5,
|
||||
"roughnessFactor": 0.8
|
||||
}}
|
||||
}}],
|
||||
"meshes": [{{
|
||||
"name": "Triangle",
|
||||
"primitives": [{{
|
||||
"attributes": {{"POSITION": 0}},
|
||||
"indices": 1,
|
||||
"material": 0
|
||||
}}]
|
||||
}}]
|
||||
}}"#, bin.len());
|
||||
|
||||
let json_bytes = json_str.as_bytes();
|
||||
let json_padded_len = (json_bytes.len() + 3) & !3;
|
||||
let mut json_padded = json_bytes.to_vec();
|
||||
while json_padded.len() < json_padded_len {
|
||||
json_padded.push(b' ');
|
||||
}
|
||||
|
||||
let total_len = 12 + 8 + json_padded.len() + 8 + bin.len();
|
||||
let mut glb = Vec::with_capacity(total_len);
|
||||
|
||||
glb.extend_from_slice(&0x46546C67u32.to_le_bytes());
|
||||
glb.extend_from_slice(&2u32.to_le_bytes());
|
||||
glb.extend_from_slice(&(total_len as u32).to_le_bytes());
|
||||
|
||||
glb.extend_from_slice(&(json_padded.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x4E4F534Au32.to_le_bytes());
|
||||
glb.extend_from_slice(&json_padded);
|
||||
|
||||
glb.extend_from_slice(&(bin.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x004E4942u32.to_le_bytes());
|
||||
glb.extend_from_slice(&bin);
|
||||
|
||||
glb
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ pub struct IblResources {
|
||||
}
|
||||
|
||||
impl IblResources {
|
||||
/// CPU fallback: generates the BRDF LUT on the CPU and uploads as Rgba8Unorm.
|
||||
pub fn new(device: &wgpu::Device, queue: &wgpu::Queue) -> Self {
|
||||
let size = BRDF_LUT_SIZE;
|
||||
|
||||
@@ -79,4 +80,120 @@ impl IblResources {
|
||||
}
|
||||
}
|
||||
|
||||
/// GPU compute path: generates the BRDF LUT via a compute shader in Rg16Float format.
|
||||
/// Higher precision than the CPU Rgba8Unorm path.
|
||||
pub fn new_gpu(device: &wgpu::Device, queue: &wgpu::Queue) -> Self {
|
||||
let size = BRDF_LUT_SIZE;
|
||||
|
||||
let extent = wgpu::Extent3d {
|
||||
width: size,
|
||||
height: size,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
|
||||
// Create Rg16Float storage texture
|
||||
let brdf_lut_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("BrdfLutTexture_GPU"),
|
||||
size: extent,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rg16Float,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
let storage_view =
|
||||
brdf_lut_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
// Create compute pipeline
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("BRDF LUT Compute Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(
|
||||
include_str!("brdf_lut_compute.wgsl").into(),
|
||||
),
|
||||
});
|
||||
|
||||
let bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("BRDF LUT Compute BGL"),
|
||||
entries: &[wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::WriteOnly,
|
||||
format: wgpu::TextureFormat::Rg16Float,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count: None,
|
||||
}],
|
||||
});
|
||||
|
||||
let pipeline_layout =
|
||||
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("BRDF LUT Compute Pipeline Layout"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline =
|
||||
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("BRDF LUT Compute Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
module: &shader,
|
||||
entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("BRDF LUT Compute Bind Group"),
|
||||
layout: &bind_group_layout,
|
||||
entries: &[wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(&storage_view),
|
||||
}],
|
||||
});
|
||||
|
||||
// Dispatch compute shader
|
||||
let mut encoder =
|
||||
device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("BRDF LUT Compute Encoder"),
|
||||
});
|
||||
|
||||
{
|
||||
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("BRDF LUT Compute Pass"),
|
||||
timestamp_writes: None,
|
||||
});
|
||||
pass.set_pipeline(&pipeline);
|
||||
pass.set_bind_group(0, &bind_group, &[]);
|
||||
// Dispatch enough workgroups to cover size x size texels (16x16 per workgroup)
|
||||
let wg_x = (size + 15) / 16;
|
||||
let wg_y = (size + 15) / 16;
|
||||
pass.dispatch_workgroups(wg_x, wg_y, 1);
|
||||
}
|
||||
|
||||
queue.submit(std::iter::once(encoder.finish()));
|
||||
|
||||
let brdf_lut_view =
|
||||
brdf_lut_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let brdf_lut_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("BrdfLutSampler"),
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
Self {
|
||||
brdf_lut_texture,
|
||||
brdf_lut_view,
|
||||
brdf_lut_sampler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
959
crates/voltex_renderer/src/jpg.rs
Normal file
959
crates/voltex_renderer/src/jpg.rs
Normal file
@@ -0,0 +1,959 @@
|
||||
/// Baseline JPEG decoder. Supports SOF0 (sequential DCT, Huffman).
|
||||
/// Returns RGBA pixel data like parse_png.
|
||||
/// Supports grayscale (1-component) and YCbCr (3-component) with
|
||||
/// chroma subsampling (4:4:4, 4:2:2, 4:2:0).
|
||||
|
||||
pub fn parse_jpg(data: &[u8]) -> Result<(Vec<u8>, u32, u32), String> {
|
||||
if data.len() < 2 || data[0] != 0xFF || data[1] != 0xD8 {
|
||||
return Err("Invalid JPEG: missing SOI marker".into());
|
||||
}
|
||||
|
||||
let mut pos = 2;
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
let mut num_components: u8 = 0;
|
||||
let mut components: Vec<JpegComponent> = Vec::new();
|
||||
let mut qt_tables: [[u16; 64]; 4] = [[0; 64]; 4];
|
||||
let mut dc_tables: [Option<HuffTable>; 4] = [None, None, None, None];
|
||||
let mut ac_tables: [Option<HuffTable>; 4] = [None, None, None, None];
|
||||
let mut found_sof = false;
|
||||
let mut restart_interval: u16 = 0;
|
||||
|
||||
while pos + 1 < data.len() {
|
||||
if data[pos] != 0xFF {
|
||||
return Err(format!("Expected marker at position {}", pos));
|
||||
}
|
||||
// Skip padding 0xFF bytes
|
||||
while pos + 1 < data.len() && data[pos + 1] == 0xFF {
|
||||
pos += 1;
|
||||
}
|
||||
if pos + 1 >= data.len() {
|
||||
return Err("Unexpected end of data".into());
|
||||
}
|
||||
let marker = data[pos + 1];
|
||||
pos += 2;
|
||||
|
||||
match marker {
|
||||
0xD8 => {} // SOI (already handled)
|
||||
0xD9 => break, // EOI
|
||||
0xDA => {
|
||||
// SOS — Start of Scan
|
||||
if !found_sof {
|
||||
return Err("SOS before SOF".into());
|
||||
}
|
||||
let (rgb, scan_end) = decode_scan(
|
||||
data, pos, width, height, num_components,
|
||||
&components, &qt_tables, &dc_tables, &ac_tables,
|
||||
restart_interval,
|
||||
)?;
|
||||
let _ = scan_end;
|
||||
// Convert RGB to RGBA
|
||||
let w = width as u32;
|
||||
let h = height as u32;
|
||||
let mut rgba = Vec::with_capacity((w * h * 4) as usize);
|
||||
for pixel in rgb.chunks_exact(3) {
|
||||
rgba.push(pixel[0]);
|
||||
rgba.push(pixel[1]);
|
||||
rgba.push(pixel[2]);
|
||||
rgba.push(255);
|
||||
}
|
||||
return Ok((rgba, w, h));
|
||||
}
|
||||
0xC0 => {
|
||||
// SOF0 — Baseline DCT
|
||||
let (sof, len) = parse_sof(data, pos)?;
|
||||
width = sof.width;
|
||||
height = sof.height;
|
||||
num_components = sof.num_components;
|
||||
components = sof.components;
|
||||
found_sof = true;
|
||||
pos += len;
|
||||
}
|
||||
0xC4 => {
|
||||
// DHT — Define Huffman Table
|
||||
let len = parse_dht(data, pos, &mut dc_tables, &mut ac_tables)?;
|
||||
pos += len;
|
||||
}
|
||||
0xDB => {
|
||||
// DQT — Define Quantization Table
|
||||
let len = parse_dqt(data, pos, &mut qt_tables)?;
|
||||
pos += len;
|
||||
}
|
||||
0xDD => {
|
||||
// DRI — Define Restart Interval
|
||||
if pos + 4 > data.len() {
|
||||
return Err("DRI too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
restart_interval =
|
||||
u16::from_be_bytes([data[pos + 2], data[pos + 3]]);
|
||||
pos += seg_len;
|
||||
}
|
||||
0xD0..=0xD7 => {
|
||||
// RST markers — handled inside scan decoder
|
||||
}
|
||||
0xE0..=0xEF | 0xFE => {
|
||||
// APP0-APP15, COM — skip
|
||||
if pos + 2 > data.len() {
|
||||
return Err("Segment too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
pos += seg_len;
|
||||
}
|
||||
_ => {
|
||||
// Unknown marker with length — skip
|
||||
if pos + 2 > data.len() {
|
||||
return Err(format!("Unknown marker 0x{:02X}", marker));
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
pos += seg_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err("No image data found (missing SOS)".into())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data structures
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JpegComponent {
|
||||
#[allow(dead_code)]
|
||||
id: u8,
|
||||
h_sample: u8,
|
||||
v_sample: u8,
|
||||
qt_id: u8,
|
||||
dc_table: u8,
|
||||
ac_table: u8,
|
||||
}
|
||||
|
||||
struct SofData {
|
||||
width: u16,
|
||||
height: u16,
|
||||
num_components: u8,
|
||||
components: Vec<JpegComponent>,
|
||||
}
|
||||
|
||||
struct HuffTable {
|
||||
symbols: Vec<u8>,
|
||||
offsets: [u16; 17],
|
||||
maxcode: [i32; 17],
|
||||
mincode: [u16; 17],
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Marker parsers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn parse_dqt(data: &[u8], pos: usize, qt_tables: &mut [[u16; 64]; 4]) -> Result<usize, String> {
|
||||
if pos + 2 > data.len() {
|
||||
return Err("DQT too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
if pos + seg_len > data.len() {
|
||||
return Err("DQT segment extends past data".into());
|
||||
}
|
||||
|
||||
let mut off = pos + 2;
|
||||
let seg_end = pos + seg_len;
|
||||
while off < seg_end {
|
||||
let pq_tq = data[off];
|
||||
let precision = pq_tq >> 4;
|
||||
let table_id = (pq_tq & 0x0F) as usize;
|
||||
off += 1;
|
||||
if table_id >= 4 {
|
||||
return Err(format!("DQT table id {} out of range", table_id));
|
||||
}
|
||||
|
||||
if precision == 0 {
|
||||
if off + 64 > seg_end {
|
||||
return Err("DQT 8-bit data too short".into());
|
||||
}
|
||||
for i in 0..64 {
|
||||
qt_tables[table_id][i] = data[off + i] as u16;
|
||||
}
|
||||
off += 64;
|
||||
} else {
|
||||
if off + 128 > seg_end {
|
||||
return Err("DQT 16-bit data too short".into());
|
||||
}
|
||||
for i in 0..64 {
|
||||
qt_tables[table_id][i] =
|
||||
u16::from_be_bytes([data[off + i * 2], data[off + i * 2 + 1]]);
|
||||
}
|
||||
off += 128;
|
||||
}
|
||||
}
|
||||
Ok(seg_len)
|
||||
}
|
||||
|
||||
fn parse_sof(data: &[u8], pos: usize) -> Result<(SofData, usize), String> {
|
||||
if pos + 2 > data.len() {
|
||||
return Err("SOF too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
if pos + seg_len > data.len() {
|
||||
return Err("SOF segment extends past data".into());
|
||||
}
|
||||
|
||||
let precision = data[pos + 2];
|
||||
if precision != 8 {
|
||||
return Err(format!("Unsupported sample precision: {}", precision));
|
||||
}
|
||||
|
||||
let height = u16::from_be_bytes([data[pos + 3], data[pos + 4]]);
|
||||
let width = u16::from_be_bytes([data[pos + 5], data[pos + 6]]);
|
||||
let num_comp = data[pos + 7];
|
||||
|
||||
let mut components = Vec::new();
|
||||
let mut off = pos + 8;
|
||||
for _ in 0..num_comp {
|
||||
if off + 3 > pos + seg_len {
|
||||
return Err("SOF component data too short".into());
|
||||
}
|
||||
let id = data[off];
|
||||
let sampling = data[off + 1];
|
||||
let h_sample = sampling >> 4;
|
||||
let v_sample = sampling & 0x0F;
|
||||
let qt_id = data[off + 2];
|
||||
components.push(JpegComponent {
|
||||
id,
|
||||
h_sample,
|
||||
v_sample,
|
||||
qt_id,
|
||||
dc_table: 0,
|
||||
ac_table: 0,
|
||||
});
|
||||
off += 3;
|
||||
}
|
||||
|
||||
Ok((
|
||||
SofData {
|
||||
width,
|
||||
height,
|
||||
num_components: num_comp,
|
||||
components,
|
||||
},
|
||||
seg_len,
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_dht(
|
||||
data: &[u8],
|
||||
pos: usize,
|
||||
dc_tables: &mut [Option<HuffTable>; 4],
|
||||
ac_tables: &mut [Option<HuffTable>; 4],
|
||||
) -> Result<usize, String> {
|
||||
if pos + 2 > data.len() {
|
||||
return Err("DHT too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
if pos + seg_len > data.len() {
|
||||
return Err("DHT segment extends past data".into());
|
||||
}
|
||||
|
||||
let mut off = pos + 2;
|
||||
let seg_end = pos + seg_len;
|
||||
while off < seg_end {
|
||||
let tc_th = data[off];
|
||||
let table_class = tc_th >> 4;
|
||||
let table_id = (tc_th & 0x0F) as usize;
|
||||
off += 1;
|
||||
if table_id >= 4 {
|
||||
return Err(format!("DHT table id {} out of range", table_id));
|
||||
}
|
||||
|
||||
if off + 16 > seg_end {
|
||||
return Err("DHT counts too short".into());
|
||||
}
|
||||
let mut counts = [0u8; 16];
|
||||
counts.copy_from_slice(&data[off..off + 16]);
|
||||
off += 16;
|
||||
|
||||
let total_symbols: usize = counts.iter().map(|&c| c as usize).sum();
|
||||
if off + total_symbols > seg_end {
|
||||
return Err("DHT symbols too short".into());
|
||||
}
|
||||
|
||||
let symbols: Vec<u8> = data[off..off + total_symbols].to_vec();
|
||||
off += total_symbols;
|
||||
|
||||
// Build lookup tables
|
||||
let mut offsets = [0u16; 17];
|
||||
let mut maxcode = [-1i32; 17];
|
||||
let mut mincode = [0u16; 17];
|
||||
let mut code: u16 = 0;
|
||||
let mut sym_offset: u16 = 0;
|
||||
|
||||
for i in 0..16 {
|
||||
offsets[i] = sym_offset;
|
||||
if counts[i] > 0 {
|
||||
mincode[i] = code;
|
||||
maxcode[i] = (code + counts[i] as u16 - 1) as i32;
|
||||
sym_offset += counts[i] as u16;
|
||||
}
|
||||
code = (code + counts[i] as u16) << 1;
|
||||
}
|
||||
offsets[16] = sym_offset;
|
||||
|
||||
let table = HuffTable {
|
||||
symbols,
|
||||
offsets,
|
||||
maxcode,
|
||||
mincode,
|
||||
};
|
||||
if table_class == 0 {
|
||||
dc_tables[table_id] = Some(table);
|
||||
} else {
|
||||
ac_tables[table_id] = Some(table);
|
||||
}
|
||||
}
|
||||
Ok(seg_len)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// BitReader — MSB-first bit reading with JPEG byte stuffing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
struct BitReader<'a> {
|
||||
data: &'a [u8],
|
||||
pos: usize,
|
||||
bit_pos: u8,
|
||||
current: u8,
|
||||
}
|
||||
|
||||
impl<'a> BitReader<'a> {
|
||||
fn new(data: &'a [u8], start: usize) -> Self {
|
||||
Self {
|
||||
data,
|
||||
pos: start,
|
||||
bit_pos: 0,
|
||||
current: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn read_byte(&mut self) -> Result<u8, String> {
|
||||
if self.pos >= self.data.len() {
|
||||
return Err("Unexpected end of scan data".into());
|
||||
}
|
||||
let byte = self.data[self.pos];
|
||||
self.pos += 1;
|
||||
if byte == 0xFF {
|
||||
if self.pos >= self.data.len() {
|
||||
return Err("Unexpected end after 0xFF".into());
|
||||
}
|
||||
let next = self.data[self.pos];
|
||||
if next == 0x00 {
|
||||
self.pos += 1; // skip stuffed 0x00
|
||||
} else if (0xD0..=0xD7).contains(&next) {
|
||||
// RST marker — skip marker byte and read next actual byte
|
||||
self.pos += 1;
|
||||
return self.read_byte();
|
||||
} else {
|
||||
return Err("Marker found in scan data".into());
|
||||
}
|
||||
}
|
||||
Ok(byte)
|
||||
}
|
||||
|
||||
fn ensure_bits(&mut self) -> Result<(), String> {
|
||||
if self.bit_pos == 0 {
|
||||
self.current = self.read_byte()?;
|
||||
self.bit_pos = 8;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_bit(&mut self) -> Result<u8, String> {
|
||||
self.ensure_bits()?;
|
||||
self.bit_pos -= 1;
|
||||
Ok((self.current >> self.bit_pos) & 1)
|
||||
}
|
||||
|
||||
fn read_bits(&mut self, count: u8) -> Result<u16, String> {
|
||||
let mut val: u16 = 0;
|
||||
for _ in 0..count {
|
||||
val = (val << 1) | self.read_bit()? as u16;
|
||||
}
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
fn decode_huffman(&mut self, table: &HuffTable) -> Result<u8, String> {
|
||||
let mut code: u16 = 0;
|
||||
for len in 0..16 {
|
||||
code = (code << 1) | self.read_bit()? as u16;
|
||||
if table.maxcode[len] >= 0 && code as i32 <= table.maxcode[len] {
|
||||
let idx = table.offsets[len] as usize + (code - table.mincode[len]) as usize;
|
||||
return Ok(table.symbols[idx]);
|
||||
}
|
||||
}
|
||||
Err("Invalid Huffman code".into())
|
||||
}
|
||||
|
||||
/// Skip to next byte-aligned position (and handle RST markers)
|
||||
fn align_to_byte(&mut self) {
|
||||
self.bit_pos = 0;
|
||||
self.current = 0;
|
||||
}
|
||||
|
||||
/// Find and skip RST marker in the byte stream
|
||||
fn skip_to_rst_marker(&mut self) -> Result<(), String> {
|
||||
// Align to byte boundary
|
||||
self.align_to_byte();
|
||||
// Look for 0xFF 0xDn marker
|
||||
loop {
|
||||
if self.pos >= self.data.len() {
|
||||
return Err("Unexpected end looking for RST marker".into());
|
||||
}
|
||||
if self.data[self.pos] == 0xFF && self.pos + 1 < self.data.len() {
|
||||
let next = self.data[self.pos + 1];
|
||||
if (0xD0..=0xD7).contains(&next) {
|
||||
self.pos += 2;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn scan_end_pos(&self) -> usize {
|
||||
self.pos
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IDCT
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Zig-zag order for 8x8 block
|
||||
const ZIGZAG: [usize; 64] = [
|
||||
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27,
|
||||
20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
|
||||
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
|
||||
];
|
||||
|
||||
fn idct(coeffs: &[i32; 64]) -> [i32; 64] {
|
||||
let mut workspace = [0.0f64; 64];
|
||||
|
||||
// Arrange from zigzag to row-major
|
||||
let mut block = [0.0f64; 64];
|
||||
for i in 0..64 {
|
||||
block[ZIGZAG[i]] = coeffs[i] as f64;
|
||||
}
|
||||
|
||||
// 1D IDCT on rows
|
||||
for row in 0..8 {
|
||||
let off = row * 8;
|
||||
idct_1d(&mut block, off);
|
||||
}
|
||||
|
||||
// Transpose
|
||||
for r in 0..8 {
|
||||
for c in 0..8 {
|
||||
workspace[c * 8 + r] = block[r * 8 + c];
|
||||
}
|
||||
}
|
||||
|
||||
// 1D IDCT on columns (now rows after transpose)
|
||||
for row in 0..8 {
|
||||
let off = row * 8;
|
||||
idct_1d(&mut workspace, off);
|
||||
}
|
||||
|
||||
// Transpose back and round
|
||||
let mut result = [0i32; 64];
|
||||
for r in 0..8 {
|
||||
for c in 0..8 {
|
||||
result[r * 8 + c] = workspace[c * 8 + r].round() as i32;
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn idct_1d(data: &mut [f64], off: usize) {
|
||||
use std::f64::consts::PI;
|
||||
let mut tmp = [0.0f64; 8];
|
||||
for x in 0..8 {
|
||||
let mut sum = 0.0;
|
||||
for u in 0..8 {
|
||||
let cu = if u == 0 { 1.0 / 2.0f64.sqrt() } else { 1.0 };
|
||||
sum += cu * data[off + u] * ((2.0 * x as f64 + 1.0) * u as f64 * PI / 16.0).cos();
|
||||
}
|
||||
tmp[x] = sum / 2.0;
|
||||
}
|
||||
data[off..off + 8].copy_from_slice(&tmp);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Scan decoder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn decode_scan(
|
||||
data: &[u8],
|
||||
pos: usize,
|
||||
width: u16,
|
||||
height: u16,
|
||||
num_components: u8,
|
||||
components: &[JpegComponent],
|
||||
qt_tables: &[[u16; 64]; 4],
|
||||
dc_tables: &[Option<HuffTable>; 4],
|
||||
ac_tables: &[Option<HuffTable>; 4],
|
||||
restart_interval: u16,
|
||||
) -> Result<(Vec<u8>, usize), String> {
|
||||
// Parse SOS header
|
||||
if pos + 2 > data.len() {
|
||||
return Err("SOS too short".into());
|
||||
}
|
||||
let seg_len = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
|
||||
let ns = data[pos + 2] as usize;
|
||||
|
||||
let mut scan_components = components.to_vec();
|
||||
let mut off = pos + 3;
|
||||
for i in 0..ns {
|
||||
let _cs = data[off]; // component selector
|
||||
let td_ta = data[off + 1];
|
||||
scan_components[i].dc_table = td_ta >> 4;
|
||||
scan_components[i].ac_table = td_ta & 0x0F;
|
||||
off += 2;
|
||||
}
|
||||
let scan_data_start = pos + seg_len;
|
||||
|
||||
let mut reader = BitReader::new(data, scan_data_start);
|
||||
|
||||
// Calculate MCU dimensions
|
||||
let max_h = scan_components
|
||||
.iter()
|
||||
.take(num_components as usize)
|
||||
.map(|c| c.h_sample)
|
||||
.max()
|
||||
.unwrap_or(1);
|
||||
let max_v = scan_components
|
||||
.iter()
|
||||
.take(num_components as usize)
|
||||
.map(|c| c.v_sample)
|
||||
.max()
|
||||
.unwrap_or(1);
|
||||
let mcu_width = max_h as u16 * 8;
|
||||
let mcu_height = max_v as u16 * 8;
|
||||
let mcus_x = (width + mcu_width - 1) / mcu_width;
|
||||
let mcus_y = (height + mcu_height - 1) / mcu_height;
|
||||
|
||||
let mut dc_pred = vec![0i32; num_components as usize];
|
||||
let mut rgb = vec![0u8; (width as usize) * (height as usize) * 3];
|
||||
|
||||
let mut mcu_count: u16 = 0;
|
||||
|
||||
for mcu_row in 0..mcus_y {
|
||||
for mcu_col in 0..mcus_x {
|
||||
// Handle restart interval
|
||||
if restart_interval > 0 && mcu_count > 0 && mcu_count % restart_interval == 0 {
|
||||
// Reset DC predictors
|
||||
for dc in dc_pred.iter_mut() {
|
||||
*dc = 0;
|
||||
}
|
||||
reader.skip_to_rst_marker()?;
|
||||
}
|
||||
|
||||
let mut mcu_blocks: Vec<Vec<[i32; 64]>> = Vec::new();
|
||||
|
||||
for (ci, comp) in scan_components
|
||||
.iter()
|
||||
.enumerate()
|
||||
.take(num_components as usize)
|
||||
{
|
||||
let blocks_h = comp.h_sample as usize;
|
||||
let blocks_v = comp.v_sample as usize;
|
||||
let mut blocks = Vec::with_capacity(blocks_h * blocks_v);
|
||||
|
||||
for _ in 0..(blocks_h * blocks_v) {
|
||||
let block = decode_block(
|
||||
&mut reader,
|
||||
dc_tables[comp.dc_table as usize]
|
||||
.as_ref()
|
||||
.ok_or("Missing DC Huffman table")?,
|
||||
ac_tables[comp.ac_table as usize]
|
||||
.as_ref()
|
||||
.ok_or("Missing AC Huffman table")?,
|
||||
&mut dc_pred[ci],
|
||||
&qt_tables[comp.qt_id as usize],
|
||||
)?;
|
||||
blocks.push(block);
|
||||
}
|
||||
mcu_blocks.push(blocks);
|
||||
}
|
||||
|
||||
assemble_mcu(
|
||||
&mcu_blocks,
|
||||
&scan_components,
|
||||
num_components,
|
||||
max_h,
|
||||
max_v,
|
||||
mcu_col as usize,
|
||||
mcu_row as usize,
|
||||
width as usize,
|
||||
height as usize,
|
||||
&mut rgb,
|
||||
);
|
||||
|
||||
mcu_count = mcu_count.wrapping_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((rgb, reader.scan_end_pos()))
|
||||
}
|
||||
|
||||
fn decode_block(
|
||||
reader: &mut BitReader,
|
||||
dc_table: &HuffTable,
|
||||
ac_table: &HuffTable,
|
||||
dc_pred: &mut i32,
|
||||
qt: &[u16; 64],
|
||||
) -> Result<[i32; 64], String> {
|
||||
let mut coeffs = [0i32; 64];
|
||||
|
||||
// DC coefficient
|
||||
let dc_len = reader.decode_huffman(dc_table)?;
|
||||
let dc_val = if dc_len > 0 {
|
||||
let bits = reader.read_bits(dc_len)? as i32;
|
||||
if bits < (1 << (dc_len - 1)) {
|
||||
bits - (1 << dc_len) + 1
|
||||
} else {
|
||||
bits
|
||||
}
|
||||
} else {
|
||||
0
|
||||
};
|
||||
*dc_pred += dc_val;
|
||||
coeffs[0] = *dc_pred * qt[0] as i32;
|
||||
|
||||
// AC coefficients
|
||||
let mut k = 1;
|
||||
while k < 64 {
|
||||
let rs = reader.decode_huffman(ac_table)?;
|
||||
let run = (rs >> 4) as usize;
|
||||
let size = (rs & 0x0F) as u8;
|
||||
|
||||
if size == 0 {
|
||||
if run == 0 {
|
||||
break;
|
||||
} // EOB
|
||||
if run == 15 {
|
||||
k += 16;
|
||||
continue;
|
||||
} // ZRL (16 zeros)
|
||||
break;
|
||||
}
|
||||
|
||||
k += run;
|
||||
if k >= 64 {
|
||||
break;
|
||||
}
|
||||
|
||||
let bits = reader.read_bits(size)? as i32;
|
||||
let val = if bits < (1 << (size - 1)) {
|
||||
bits - (1 << size) + 1
|
||||
} else {
|
||||
bits
|
||||
};
|
||||
coeffs[k] = val * qt[k] as i32;
|
||||
k += 1;
|
||||
}
|
||||
|
||||
Ok(idct(&coeffs))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MCU assembly + color conversion
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn assemble_mcu(
|
||||
mcu_blocks: &[Vec<[i32; 64]>],
|
||||
components: &[JpegComponent],
|
||||
num_components: u8,
|
||||
max_h: u8,
|
||||
max_v: u8,
|
||||
mcu_col: usize,
|
||||
mcu_row: usize,
|
||||
img_width: usize,
|
||||
img_height: usize,
|
||||
rgb: &mut [u8],
|
||||
) {
|
||||
let mcu_px = mcu_col * max_h as usize * 8;
|
||||
let mcu_py = mcu_row * max_v as usize * 8;
|
||||
|
||||
for py in 0..(max_v as usize * 8) {
|
||||
for px in 0..(max_h as usize * 8) {
|
||||
let x = mcu_px + px;
|
||||
let y = mcu_py + py;
|
||||
if x >= img_width || y >= img_height {
|
||||
continue;
|
||||
}
|
||||
|
||||
if num_components == 1 {
|
||||
// Grayscale: IDCT output centered at 0, add 128 for level shift
|
||||
let val =
|
||||
sample_component(&mcu_blocks[0], &components[0], max_h, max_v, px, py) + 128;
|
||||
let clamped = val.clamp(0, 255) as u8;
|
||||
let offset = (y * img_width + x) * 3;
|
||||
rgb[offset] = clamped;
|
||||
rgb[offset + 1] = clamped;
|
||||
rgb[offset + 2] = clamped;
|
||||
} else {
|
||||
// YCbCr -> RGB
|
||||
// IDCT output centered at 0; Y needs +128 level shift, Cb/Cr centered at 0 (128 subtracted)
|
||||
let yy = sample_component(&mcu_blocks[0], &components[0], max_h, max_v, px, py)
|
||||
as f32
|
||||
+ 128.0;
|
||||
let cb = sample_component(&mcu_blocks[1], &components[1], max_h, max_v, px, py)
|
||||
as f32;
|
||||
let cr = sample_component(&mcu_blocks[2], &components[2], max_h, max_v, px, py)
|
||||
as f32;
|
||||
|
||||
let r = (yy + 1.402 * cr).round().clamp(0.0, 255.0) as u8;
|
||||
let g = (yy - 0.344136 * cb - 0.714136 * cr).round().clamp(0.0, 255.0) as u8;
|
||||
let b = (yy + 1.772 * cb).round().clamp(0.0, 255.0) as u8;
|
||||
|
||||
let offset = (y * img_width + x) * 3;
|
||||
rgb[offset] = r;
|
||||
rgb[offset + 1] = g;
|
||||
rgb[offset + 2] = b;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_component(
|
||||
blocks: &[[i32; 64]],
|
||||
comp: &JpegComponent,
|
||||
max_h: u8,
|
||||
max_v: u8,
|
||||
px: usize,
|
||||
py: usize,
|
||||
) -> i32 {
|
||||
let scale_x = comp.h_sample as usize;
|
||||
let scale_y = comp.v_sample as usize;
|
||||
let cx = px * scale_x / (max_h as usize * 8);
|
||||
let cy = py * scale_y / (max_v as usize * 8);
|
||||
let bx = (px * scale_x / max_h as usize) % 8;
|
||||
let by = (py * scale_y / max_v as usize) % 8;
|
||||
let block_idx = cy * scale_x + cx;
|
||||
if block_idx < blocks.len() {
|
||||
blocks[block_idx][by * 8 + bx]
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_invalid_signature() {
|
||||
let data = [0u8; 10];
|
||||
assert!(parse_jpg(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_data() {
|
||||
assert!(parse_jpg(&[]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_soi_only() {
|
||||
let data = [0xFF, 0xD8];
|
||||
assert!(parse_jpg(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dqt_8bit() {
|
||||
let mut seg = Vec::new();
|
||||
seg.extend_from_slice(&67u16.to_be_bytes()); // length = 67
|
||||
seg.push(0x00); // precision=0 (8-bit), table_id=0
|
||||
for i in 0..64u8 {
|
||||
seg.push(i + 1);
|
||||
}
|
||||
let mut qt_tables = [[0u16; 64]; 4];
|
||||
let len = parse_dqt(&seg, 0, &mut qt_tables).unwrap();
|
||||
assert_eq!(len, 67);
|
||||
assert_eq!(qt_tables[0][0], 1);
|
||||
assert_eq!(qt_tables[0][63], 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dht() {
|
||||
let mut seg = Vec::new();
|
||||
let mut body = Vec::new();
|
||||
body.push(0x00); // class=0 (DC), id=0
|
||||
// counts: 1 symbol at length 1, 0 for lengths 2-16
|
||||
body.push(1);
|
||||
for _ in 1..16 {
|
||||
body.push(0);
|
||||
}
|
||||
body.push(0x05); // the symbol
|
||||
|
||||
seg.extend_from_slice(&((body.len() + 2) as u16).to_be_bytes());
|
||||
seg.extend_from_slice(&body);
|
||||
|
||||
let mut dc_tables: [Option<HuffTable>; 4] = [None, None, None, None];
|
||||
let mut ac_tables: [Option<HuffTable>; 4] = [None, None, None, None];
|
||||
let len = parse_dht(&seg, 0, &mut dc_tables, &mut ac_tables).unwrap();
|
||||
assert_eq!(len, seg.len());
|
||||
assert!(dc_tables[0].is_some());
|
||||
let table = dc_tables[0].as_ref().unwrap();
|
||||
assert_eq!(table.symbols[0], 0x05);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bit_reader_basic() {
|
||||
// 0xA5 = 10100101
|
||||
let data = [0xA5];
|
||||
let mut reader = BitReader::new(&data, 0);
|
||||
assert_eq!(reader.read_bits(1).unwrap(), 1);
|
||||
assert_eq!(reader.read_bits(1).unwrap(), 0);
|
||||
assert_eq!(reader.read_bits(3).unwrap(), 0b100);
|
||||
assert_eq!(reader.read_bits(3).unwrap(), 0b101);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bit_reader_byte_stuffing() {
|
||||
// JPEG byte stuffing: 0xFF 0x00 -> single 0xFF byte
|
||||
let data = [0xFF, 0x00, 0x80];
|
||||
let mut reader = BitReader::new(&data, 0);
|
||||
let val = reader.read_bits(8).unwrap();
|
||||
assert_eq!(val, 0xFF);
|
||||
let val2 = reader.read_bits(1).unwrap();
|
||||
assert_eq!(val2, 1); // 0x80 = 10000000
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_idct_dc_only() {
|
||||
let mut block = [0i32; 64];
|
||||
block[0] = 800; // after dequantization
|
||||
let result = idct(&block);
|
||||
let expected = 100; // 800/8 = 100
|
||||
for &v in &result {
|
||||
assert!(
|
||||
(v - expected).abs() <= 1,
|
||||
"DC-only IDCT: expected ~{}, got {}",
|
||||
expected,
|
||||
v
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_idct_known_values() {
|
||||
let mut block = [0i32; 64];
|
||||
block[0] = 640;
|
||||
block[1] = 100;
|
||||
let result = idct(&block);
|
||||
let avg: i32 = result.iter().sum::<i32>() / 64;
|
||||
assert!((avg - 80).abs() <= 2);
|
||||
}
|
||||
|
||||
/// Build a minimal valid 8x8 Baseline JPEG (grayscale) for testing.
|
||||
/// DC diff = 0 => Y = 0 => after +128 level shift => pixel = 128.
|
||||
fn build_minimal_jpeg_8x8() -> Vec<u8> {
|
||||
let mut out = Vec::new();
|
||||
|
||||
// SOI
|
||||
out.extend_from_slice(&[0xFF, 0xD8]);
|
||||
|
||||
// DQT — all-ones quantization table (id=0)
|
||||
out.extend_from_slice(&[0xFF, 0xDB]);
|
||||
let mut dqt = Vec::new();
|
||||
dqt.extend_from_slice(&0x0043u16.to_be_bytes()); // length = 67
|
||||
dqt.push(0x00); // 8-bit, table 0
|
||||
for _ in 0..64 {
|
||||
dqt.push(1);
|
||||
}
|
||||
out.extend_from_slice(&dqt);
|
||||
|
||||
// SOF0 — 8x8, 1 component (grayscale)
|
||||
out.extend_from_slice(&[0xFF, 0xC0]);
|
||||
out.extend_from_slice(&0x000Bu16.to_be_bytes()); // length = 11
|
||||
out.push(8); // precision
|
||||
out.extend_from_slice(&8u16.to_be_bytes()); // height
|
||||
out.extend_from_slice(&8u16.to_be_bytes()); // width
|
||||
out.push(1); // 1 component
|
||||
out.push(1); // component ID
|
||||
out.push(0x11); // h_sample=1, v_sample=1
|
||||
out.push(0); // qt table 0
|
||||
|
||||
// DHT — DC table (class=0, id=0): 1 symbol at length 1, symbol = 0x00
|
||||
out.extend_from_slice(&[0xFF, 0xC4]);
|
||||
let mut dht_body = Vec::new();
|
||||
dht_body.push(0x00); // DC, id=0
|
||||
dht_body.push(1); // 1 symbol at length 1
|
||||
for _ in 1..16 {
|
||||
dht_body.push(0);
|
||||
}
|
||||
dht_body.push(0x00); // symbol: category 0 (DC diff = 0)
|
||||
let dht_len = (dht_body.len() + 2) as u16;
|
||||
out.extend_from_slice(&dht_len.to_be_bytes());
|
||||
out.extend_from_slice(&dht_body);
|
||||
|
||||
// DHT — AC table (class=1, id=0): 1 symbol at length 1, symbol = 0x00 (EOB)
|
||||
out.extend_from_slice(&[0xFF, 0xC4]);
|
||||
let mut dht_ac = Vec::new();
|
||||
dht_ac.push(0x10); // AC, id=0
|
||||
dht_ac.push(1); // 1 symbol at length 1
|
||||
for _ in 1..16 {
|
||||
dht_ac.push(0);
|
||||
}
|
||||
dht_ac.push(0x00); // symbol: 0x00 = EOB
|
||||
let dht_ac_len = (dht_ac.len() + 2) as u16;
|
||||
out.extend_from_slice(&dht_ac_len.to_be_bytes());
|
||||
out.extend_from_slice(&dht_ac);
|
||||
|
||||
// SOS
|
||||
out.extend_from_slice(&[0xFF, 0xDA]);
|
||||
out.extend_from_slice(&0x0008u16.to_be_bytes()); // length=8
|
||||
out.push(1); // 1 component
|
||||
out.push(1); // component id=1
|
||||
out.push(0x00); // DC table 0, AC table 0
|
||||
out.push(0); // Ss
|
||||
out.push(63); // Se
|
||||
out.push(0); // Ah=0, Al=0
|
||||
|
||||
// Scan data: DC=0 (code=0, 1 bit), AC=EOB (code=0, 1 bit)
|
||||
// Bits: 0 (DC diff=0) + 0 (EOB) = 0b00 -> padded to byte: 0x00
|
||||
out.push(0x00);
|
||||
out.push(0x00);
|
||||
|
||||
// EOI
|
||||
out.extend_from_slice(&[0xFF, 0xD9]);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_grayscale_flat() {
|
||||
let jpg_data = build_minimal_jpeg_8x8();
|
||||
let (rgba, w, h) = parse_jpg(&jpg_data).unwrap();
|
||||
assert_eq!(w, 8);
|
||||
assert_eq!(h, 8);
|
||||
assert_eq!(rgba.len(), 8 * 8 * 4);
|
||||
// Grayscale mid-gray: all pixels should be ~128
|
||||
for i in (0..rgba.len()).step_by(4) {
|
||||
assert_eq!(rgba[i], rgba[i + 1]); // R == G
|
||||
assert_eq!(rgba[i + 1], rgba[i + 2]); // G == B
|
||||
assert_eq!(rgba[i + 3], 255); // alpha
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_marker() {
|
||||
let data = [0xFF, 0xD8, 0x00]; // SOI then garbage
|
||||
assert!(parse_jpg(&data).is_err());
|
||||
}
|
||||
}
|
||||
297
crates/voltex_renderer/src/json_parser.rs
Normal file
297
crates/voltex_renderer/src/json_parser.rs
Normal file
@@ -0,0 +1,297 @@
|
||||
/// Minimal JSON parser for glTF. No external dependencies.
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum JsonValue {
|
||||
Null,
|
||||
Bool(bool),
|
||||
Number(f64),
|
||||
String(String),
|
||||
Array(Vec<JsonValue>),
|
||||
Object(Vec<(String, JsonValue)>), // preserve order
|
||||
}
|
||||
|
||||
impl JsonValue {
|
||||
pub fn as_object(&self) -> Option<&[(String, JsonValue)]> {
|
||||
match self { JsonValue::Object(v) => Some(v), _ => None }
|
||||
}
|
||||
pub fn as_array(&self) -> Option<&[JsonValue]> {
|
||||
match self { JsonValue::Array(v) => Some(v), _ => None }
|
||||
}
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self { JsonValue::String(s) => Some(s), _ => None }
|
||||
}
|
||||
pub fn as_f64(&self) -> Option<f64> {
|
||||
match self { JsonValue::Number(n) => Some(*n), _ => None }
|
||||
}
|
||||
pub fn as_u32(&self) -> Option<u32> {
|
||||
self.as_f64().map(|n| n as u32)
|
||||
}
|
||||
pub fn as_bool(&self) -> Option<bool> {
|
||||
match self { JsonValue::Bool(b) => Some(*b), _ => None }
|
||||
}
|
||||
pub fn get(&self, key: &str) -> Option<&JsonValue> {
|
||||
self.as_object()?.iter().find(|(k, _)| k == key).map(|(_, v)| v)
|
||||
}
|
||||
pub fn index(&self, i: usize) -> Option<&JsonValue> {
|
||||
self.as_array()?.get(i)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_json(input: &str) -> Result<JsonValue, String> {
|
||||
let mut parser = JsonParser::new(input);
|
||||
let val = parser.parse_value()?;
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
struct JsonParser<'a> {
|
||||
input: &'a [u8],
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> JsonParser<'a> {
|
||||
fn new(input: &'a str) -> Self {
|
||||
Self { input: input.as_bytes(), pos: 0 }
|
||||
}
|
||||
|
||||
fn skip_whitespace(&mut self) {
|
||||
while self.pos < self.input.len() {
|
||||
match self.input[self.pos] {
|
||||
b' ' | b'\t' | b'\n' | b'\r' => self.pos += 1,
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn peek(&self) -> Option<u8> {
|
||||
self.input.get(self.pos).copied()
|
||||
}
|
||||
|
||||
fn advance(&mut self) -> Result<u8, String> {
|
||||
if self.pos >= self.input.len() {
|
||||
return Err("Unexpected end of JSON".into());
|
||||
}
|
||||
let b = self.input[self.pos];
|
||||
self.pos += 1;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
fn expect(&mut self, ch: u8) -> Result<(), String> {
|
||||
let b = self.advance()?;
|
||||
if b != ch {
|
||||
return Err(format!("Expected '{}', got '{}'", ch as char, b as char));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_value(&mut self) -> Result<JsonValue, String> {
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b'"') => self.parse_string().map(JsonValue::String),
|
||||
Some(b'{') => self.parse_object(),
|
||||
Some(b'[') => self.parse_array(),
|
||||
Some(b't') => self.parse_literal("true", JsonValue::Bool(true)),
|
||||
Some(b'f') => self.parse_literal("false", JsonValue::Bool(false)),
|
||||
Some(b'n') => self.parse_literal("null", JsonValue::Null),
|
||||
Some(b'-') | Some(b'0'..=b'9') => self.parse_number(),
|
||||
Some(ch) => Err(format!("Unexpected character: '{}'", ch as char)),
|
||||
None => Err("Unexpected end of JSON".into()),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_string(&mut self) -> Result<String, String> {
|
||||
self.expect(b'"')?;
|
||||
let mut s = String::new();
|
||||
loop {
|
||||
let b = self.advance()?;
|
||||
match b {
|
||||
b'"' => return Ok(s),
|
||||
b'\\' => {
|
||||
let esc = self.advance()?;
|
||||
match esc {
|
||||
b'"' => s.push('"'),
|
||||
b'\\' => s.push('\\'),
|
||||
b'/' => s.push('/'),
|
||||
b'b' => s.push('\u{08}'),
|
||||
b'f' => s.push('\u{0C}'),
|
||||
b'n' => s.push('\n'),
|
||||
b'r' => s.push('\r'),
|
||||
b't' => s.push('\t'),
|
||||
b'u' => {
|
||||
let mut hex = String::new();
|
||||
for _ in 0..4 {
|
||||
hex.push(self.advance()? as char);
|
||||
}
|
||||
let code = u32::from_str_radix(&hex, 16)
|
||||
.map_err(|_| format!("Invalid unicode escape: {}", hex))?;
|
||||
if let Some(ch) = char::from_u32(code) {
|
||||
s.push(ch);
|
||||
}
|
||||
}
|
||||
_ => return Err(format!("Invalid escape: \\{}", esc as char)),
|
||||
}
|
||||
}
|
||||
_ => s.push(b as char),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_number(&mut self) -> Result<JsonValue, String> {
|
||||
let start = self.pos;
|
||||
if self.peek() == Some(b'-') { self.pos += 1; }
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
if self.pos < self.input.len() && self.input[self.pos] == b'.' {
|
||||
self.pos += 1;
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
}
|
||||
if self.pos < self.input.len() && (self.input[self.pos] == b'e' || self.input[self.pos] == b'E') {
|
||||
self.pos += 1;
|
||||
if self.pos < self.input.len() && (self.input[self.pos] == b'+' || self.input[self.pos] == b'-') {
|
||||
self.pos += 1;
|
||||
}
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
}
|
||||
let s = std::str::from_utf8(&self.input[start..self.pos])
|
||||
.map_err(|_| "Invalid UTF-8 in number".to_string())?;
|
||||
let n: f64 = s.parse().map_err(|_| format!("Invalid number: {}", s))?;
|
||||
Ok(JsonValue::Number(n))
|
||||
}
|
||||
|
||||
fn parse_object(&mut self) -> Result<JsonValue, String> {
|
||||
self.expect(b'{')?;
|
||||
self.skip_whitespace();
|
||||
let mut pairs = Vec::new();
|
||||
if self.peek() == Some(b'}') {
|
||||
self.pos += 1;
|
||||
return Ok(JsonValue::Object(pairs));
|
||||
}
|
||||
loop {
|
||||
self.skip_whitespace();
|
||||
let key = self.parse_string()?;
|
||||
self.skip_whitespace();
|
||||
self.expect(b':')?;
|
||||
let val = self.parse_value()?;
|
||||
pairs.push((key, val));
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b',') => { self.pos += 1; }
|
||||
Some(b'}') => { self.pos += 1; return Ok(JsonValue::Object(pairs)); }
|
||||
_ => return Err("Expected ',' or '}' in object".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_array(&mut self) -> Result<JsonValue, String> {
|
||||
self.expect(b'[')?;
|
||||
self.skip_whitespace();
|
||||
let mut items = Vec::new();
|
||||
if self.peek() == Some(b']') {
|
||||
self.pos += 1;
|
||||
return Ok(JsonValue::Array(items));
|
||||
}
|
||||
loop {
|
||||
let val = self.parse_value()?;
|
||||
items.push(val);
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b',') => { self.pos += 1; }
|
||||
Some(b']') => { self.pos += 1; return Ok(JsonValue::Array(items)); }
|
||||
_ => return Err("Expected ',' or ']' in array".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_literal(&mut self, expected: &str, value: JsonValue) -> Result<JsonValue, String> {
|
||||
for &b in expected.as_bytes() {
|
||||
let actual = self.advance()?;
|
||||
if actual != b {
|
||||
return Err(format!("Expected '{}', got '{}'", b as char, actual as char));
|
||||
}
|
||||
}
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_null() {
|
||||
assert_eq!(parse_json("null").unwrap(), JsonValue::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bool() {
|
||||
assert_eq!(parse_json("true").unwrap(), JsonValue::Bool(true));
|
||||
assert_eq!(parse_json("false").unwrap(), JsonValue::Bool(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_number() {
|
||||
match parse_json("42").unwrap() {
|
||||
JsonValue::Number(n) => assert!((n - 42.0).abs() < 1e-10),
|
||||
other => panic!("Expected Number, got {:?}", other),
|
||||
}
|
||||
match parse_json("-3.14").unwrap() {
|
||||
JsonValue::Number(n) => assert!((n - (-3.14)).abs() < 1e-10),
|
||||
other => panic!("Expected Number, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string() {
|
||||
assert_eq!(parse_json("\"hello\"").unwrap(), JsonValue::String("hello".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string_escapes() {
|
||||
assert_eq!(
|
||||
parse_json(r#""hello\nworld""#).unwrap(),
|
||||
JsonValue::String("hello\nworld".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_array() {
|
||||
let val = parse_json("[1, 2, 3]").unwrap();
|
||||
match val {
|
||||
JsonValue::Array(arr) => assert_eq!(arr.len(), 3),
|
||||
other => panic!("Expected Array, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_object() {
|
||||
let val = parse_json(r#"{"name": "test", "value": 42}"#).unwrap();
|
||||
match val {
|
||||
JsonValue::Object(map) => {
|
||||
assert_eq!(map.len(), 2);
|
||||
assert_eq!(map[0].0, "name");
|
||||
}
|
||||
other => panic!("Expected Object, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_nested() {
|
||||
let json = r#"{"meshes": [{"name": "Cube", "primitives": [{"attributes": {"POSITION": 0}}]}]}"#;
|
||||
let val = parse_json(json).unwrap();
|
||||
assert!(matches!(val, JsonValue::Object(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_array() {
|
||||
assert_eq!(parse_json("[]").unwrap(), JsonValue::Array(vec![]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_object() {
|
||||
assert_eq!(parse_json("{}").unwrap(), JsonValue::Object(vec![]));
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
pub mod json_parser;
|
||||
pub mod gltf;
|
||||
pub mod deflate;
|
||||
pub mod png;
|
||||
pub mod jpg;
|
||||
pub mod gpu;
|
||||
pub mod light;
|
||||
pub mod obj;
|
||||
@@ -13,8 +16,13 @@ pub mod sphere;
|
||||
pub mod pbr_pipeline;
|
||||
pub mod shadow;
|
||||
pub mod shadow_pipeline;
|
||||
pub mod csm;
|
||||
pub mod point_shadow;
|
||||
pub mod spot_shadow;
|
||||
pub mod frustum;
|
||||
pub mod brdf_lut;
|
||||
pub mod ibl;
|
||||
pub mod sh;
|
||||
pub mod gbuffer;
|
||||
pub mod fullscreen_quad;
|
||||
pub mod deferred_pipeline;
|
||||
@@ -29,13 +37,18 @@ pub use gpu::{GpuContext, DEPTH_FORMAT};
|
||||
pub use light::{CameraUniform, LightUniform, LightData, LightsUniform, MAX_LIGHTS, LIGHT_DIRECTIONAL, LIGHT_POINT, LIGHT_SPOT};
|
||||
pub use mesh::Mesh;
|
||||
pub use camera::{Camera, FpsController};
|
||||
pub use texture::{GpuTexture, pbr_texture_bind_group_layout, create_pbr_texture_bind_group};
|
||||
pub use texture::{GpuTexture, pbr_texture_bind_group_layout, create_pbr_texture_bind_group, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group};
|
||||
pub use material::MaterialUniform;
|
||||
pub use sphere::generate_sphere;
|
||||
pub use pbr_pipeline::create_pbr_pipeline;
|
||||
pub use shadow::{ShadowMap, ShadowUniform, ShadowPassUniform, SHADOW_MAP_SIZE, SHADOW_FORMAT};
|
||||
pub use shadow_pipeline::{create_shadow_pipeline, shadow_pass_bind_group_layout};
|
||||
pub use csm::{CascadedShadowMap, CsmUniform, compute_cascade_matrices, CSM_CASCADE_COUNT, CSM_MAP_SIZE, CSM_FORMAT};
|
||||
pub use point_shadow::{PointShadowMap, point_shadow_view_matrices, point_shadow_projection, POINT_SHADOW_SIZE, POINT_SHADOW_FORMAT};
|
||||
pub use spot_shadow::{SpotShadowMap, spot_shadow_matrix};
|
||||
pub use frustum::{Plane, Frustum, extract_frustum, sphere_vs_frustum, cull_lights};
|
||||
pub use ibl::IblResources;
|
||||
pub use sh::{compute_sh_coefficients, pack_sh_coefficients, evaluate_sh_cpu};
|
||||
pub use gbuffer::GBuffer;
|
||||
pub use fullscreen_quad::{create_fullscreen_vertex_buffer, FullscreenVertex};
|
||||
pub use deferred_pipeline::{
|
||||
@@ -54,3 +67,5 @@ pub use hdr::{HdrTarget, HDR_FORMAT};
|
||||
pub use bloom::{BloomResources, BloomUniform, mip_sizes, BLOOM_MIP_COUNT};
|
||||
pub use tonemap::{TonemapUniform, aces_tonemap};
|
||||
pub use png::parse_png;
|
||||
pub use jpg::parse_jpg;
|
||||
pub use gltf::{parse_gltf, GltfData, GltfMesh, GltfMaterial};
|
||||
|
||||
@@ -36,6 +36,10 @@ struct MaterialUniform {
|
||||
@group(1) @binding(1) var s_diffuse: sampler;
|
||||
@group(1) @binding(2) var t_normal: texture_2d<f32>;
|
||||
@group(1) @binding(3) var s_normal: sampler;
|
||||
@group(1) @binding(4) var t_orm: texture_2d<f32>;
|
||||
@group(1) @binding(5) var s_orm: sampler;
|
||||
@group(1) @binding(6) var t_emissive: texture_2d<f32>;
|
||||
@group(1) @binding(7) var s_emissive: sampler;
|
||||
|
||||
@group(2) @binding(0) var<uniform> material: MaterialUniform;
|
||||
|
||||
@@ -43,6 +47,10 @@ struct ShadowUniform {
|
||||
light_view_proj: mat4x4<f32>,
|
||||
shadow_map_size: f32,
|
||||
shadow_bias: f32,
|
||||
_padding: vec2<f32>,
|
||||
sun_direction: vec3<f32>,
|
||||
turbidity: f32,
|
||||
sh_coefficients: array<vec4<f32>, 7>,
|
||||
};
|
||||
|
||||
@group(3) @binding(0) var t_shadow: texture_depth_2d;
|
||||
@@ -229,30 +237,93 @@ fn calculate_shadow(light_space_pos: vec4<f32>) -> f32 {
|
||||
return shadow_val / 9.0;
|
||||
}
|
||||
|
||||
// Procedural environment sampling for IBL
|
||||
// Hosek-Wilkie inspired procedural sky model
|
||||
fn sample_environment(direction: vec3<f32>, roughness: f32) -> vec3<f32> {
|
||||
let t = direction.y * 0.5 + 0.5;
|
||||
let sun_dir = normalize(shadow.sun_direction);
|
||||
let turb = clamp(shadow.turbidity, 1.5, 10.0);
|
||||
|
||||
var env: vec3<f32>;
|
||||
if direction.y > 0.0 {
|
||||
let horizon = vec3<f32>(0.6, 0.6, 0.5);
|
||||
let sky = vec3<f32>(0.3, 0.5, 0.9);
|
||||
env = mix(horizon, sky, pow(direction.y, 0.4));
|
||||
// Rayleigh scattering: blue zenith, warm horizon
|
||||
let zenith_color = vec3<f32>(0.15, 0.3, 0.8) * (1.0 / (turb * 0.15 + 0.5));
|
||||
let horizon_color = vec3<f32>(0.7, 0.6, 0.5) * (1.0 + turb * 0.04);
|
||||
|
||||
let elevation = direction.y;
|
||||
let sky_gradient = mix(horizon_color, zenith_color, pow(elevation, 0.4));
|
||||
|
||||
// Mie scattering: haze near sun direction
|
||||
let cos_sun = max(dot(direction, sun_dir), 0.0);
|
||||
let mie_strength = turb * 0.02;
|
||||
let mie = mie_strength * pow(cos_sun, 8.0) * vec3<f32>(1.0, 0.9, 0.7);
|
||||
|
||||
// Sun disk: bright spot with falloff
|
||||
let sun_disk = pow(max(cos_sun, 0.0), 2048.0) * vec3<f32>(10.0, 9.0, 7.0);
|
||||
|
||||
// Combine
|
||||
env = sky_gradient + mie + sun_disk;
|
||||
} else {
|
||||
let horizon = vec3<f32>(0.6, 0.6, 0.5);
|
||||
let ground = vec3<f32>(0.1, 0.08, 0.06);
|
||||
env = mix(horizon, ground, pow(-direction.y, 0.4));
|
||||
// Ground: dark, warm
|
||||
let horizon_color = vec3<f32>(0.6, 0.55, 0.45);
|
||||
let ground_color = vec3<f32>(0.1, 0.08, 0.06);
|
||||
env = mix(horizon_color, ground_color, pow(-direction.y, 0.4));
|
||||
}
|
||||
|
||||
// Roughness blur: blend toward average for rough surfaces
|
||||
let avg = vec3<f32>(0.3, 0.35, 0.4);
|
||||
return mix(env, avg, roughness * roughness);
|
||||
}
|
||||
|
||||
// Evaluate L2 Spherical Harmonics at given normal direction
|
||||
// 9 SH coefficients (RGB) packed into 7 vec4s
|
||||
fn evaluate_sh(normal: vec3<f32>, coeffs: array<vec4<f32>, 7>) -> vec3<f32> {
|
||||
let x = normal.x;
|
||||
let y = normal.y;
|
||||
let z = normal.z;
|
||||
|
||||
// SH basis functions (real, L2 order)
|
||||
let Y00 = 0.282095; // L=0, M=0
|
||||
let Y1n1 = 0.488603 * y; // L=1, M=-1
|
||||
let Y10 = 0.488603 * z; // L=1, M=0
|
||||
let Y1p1 = 0.488603 * x; // L=1, M=1
|
||||
let Y2n2 = 1.092548 * x * y; // L=2, M=-2
|
||||
let Y2n1 = 1.092548 * y * z; // L=2, M=-1
|
||||
let Y20 = 0.315392 * (3.0 * z * z - 1.0); // L=2, M=0
|
||||
let Y2p1 = 1.092548 * x * z; // L=2, M=1
|
||||
let Y2p2 = 0.546274 * (x * x - y * y); // L=2, M=2
|
||||
|
||||
// Unpack: coeffs[0].xyz = c0_rgb, coeffs[0].w = c1_r,
|
||||
// coeffs[1].xyz = c1_gb + c2_r, coeffs[1].w = c2_g, etc.
|
||||
// Packing: 9 coeffs * 3 channels = 27 floats -> 7 vec4s (28 floats, last padded)
|
||||
let c0 = vec3<f32>(coeffs[0].x, coeffs[0].y, coeffs[0].z);
|
||||
let c1 = vec3<f32>(coeffs[0].w, coeffs[1].x, coeffs[1].y);
|
||||
let c2 = vec3<f32>(coeffs[1].z, coeffs[1].w, coeffs[2].x);
|
||||
let c3 = vec3<f32>(coeffs[2].y, coeffs[2].z, coeffs[2].w);
|
||||
let c4 = vec3<f32>(coeffs[3].x, coeffs[3].y, coeffs[3].z);
|
||||
let c5 = vec3<f32>(coeffs[3].w, coeffs[4].x, coeffs[4].y);
|
||||
let c6 = vec3<f32>(coeffs[4].z, coeffs[4].w, coeffs[5].x);
|
||||
let c7 = vec3<f32>(coeffs[5].y, coeffs[5].z, coeffs[5].w);
|
||||
let c8 = vec3<f32>(coeffs[6].x, coeffs[6].y, coeffs[6].z);
|
||||
|
||||
return max(
|
||||
c0 * Y00 + c1 * Y1n1 + c2 * Y10 + c3 * Y1p1 +
|
||||
c4 * Y2n2 + c5 * Y2n1 + c6 * Y20 + c7 * Y2p1 + c8 * Y2p2,
|
||||
vec3<f32>(0.0)
|
||||
);
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let tex_color = textureSample(t_diffuse, s_diffuse, in.uv);
|
||||
let albedo = material.base_color.rgb * tex_color.rgb;
|
||||
let metallic = material.metallic;
|
||||
let roughness = material.roughness;
|
||||
let ao = material.ao;
|
||||
|
||||
// Sample ORM texture: R=AO, G=Roughness, B=Metallic; multiply with material params
|
||||
let orm_sample = textureSample(t_orm, s_orm, in.uv);
|
||||
let ao = orm_sample.r * material.ao;
|
||||
let roughness = orm_sample.g * material.roughness;
|
||||
let metallic = orm_sample.b * material.metallic;
|
||||
|
||||
// Sample emissive texture
|
||||
let emissive = textureSample(t_emissive, s_emissive, in.uv).rgb;
|
||||
|
||||
// Normal mapping via TBN matrix
|
||||
let T = normalize(in.world_tangent);
|
||||
@@ -291,8 +362,14 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let NdotV_ibl = max(dot(N, V), 0.0);
|
||||
let R = reflect(-V, N);
|
||||
|
||||
// Diffuse IBL
|
||||
let irradiance = sample_environment(N, 1.0);
|
||||
// Diffuse IBL: use SH irradiance if SH coefficients are set, else fallback to procedural
|
||||
var irradiance: vec3<f32>;
|
||||
let sh_test = shadow.sh_coefficients[0].x + shadow.sh_coefficients[0].y + shadow.sh_coefficients[0].z;
|
||||
if abs(sh_test) > 0.0001 {
|
||||
irradiance = evaluate_sh(N, shadow.sh_coefficients);
|
||||
} else {
|
||||
irradiance = sample_environment(N, 1.0);
|
||||
}
|
||||
let F_env = fresnel_schlick(NdotV_ibl, F0);
|
||||
let kd_ibl = (vec3<f32>(1.0) - F_env) * (1.0 - metallic);
|
||||
let diffuse_ibl = kd_ibl * albedo * irradiance;
|
||||
@@ -304,7 +381,7 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
|
||||
let ambient = (diffuse_ibl + specular_ibl) * ao;
|
||||
|
||||
var color = ambient + Lo;
|
||||
var color = ambient + Lo + emissive;
|
||||
|
||||
// Reinhard tone mapping
|
||||
color = color / (color + vec3<f32>(1.0));
|
||||
|
||||
178
crates/voltex_renderer/src/point_shadow.rs
Normal file
178
crates/voltex_renderer/src/point_shadow.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
use voltex_math::{Mat4, Vec3};
|
||||
|
||||
pub const POINT_SHADOW_SIZE: u32 = 512;
|
||||
pub const POINT_SHADOW_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
|
||||
|
||||
/// Depth cube map for omnidirectional point light shadows.
|
||||
///
|
||||
/// Uses a single cube texture with 6 faces (512x512 each).
|
||||
pub struct PointShadowMap {
|
||||
pub texture: wgpu::Texture,
|
||||
pub view: wgpu::TextureView,
|
||||
/// Per-face views for rendering into each cube face.
|
||||
pub face_views: [wgpu::TextureView; 6],
|
||||
pub sampler: wgpu::Sampler,
|
||||
}
|
||||
|
||||
impl PointShadowMap {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Point Shadow Cube Texture"),
|
||||
size: wgpu::Extent3d {
|
||||
width: POINT_SHADOW_SIZE,
|
||||
height: POINT_SHADOW_SIZE,
|
||||
depth_or_array_layers: 6,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: POINT_SHADOW_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
// Full cube view for sampling in shader
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: Some("Point Shadow Cube View"),
|
||||
dimension: Some(wgpu::TextureViewDimension::Cube),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Per-face views for rendering
|
||||
let face_labels = ["+X", "-X", "+Y", "-Y", "+Z", "-Z"];
|
||||
let face_views = std::array::from_fn(|i| {
|
||||
texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: Some(&format!("Point Shadow Face {}", face_labels[i])),
|
||||
dimension: Some(wgpu::TextureViewDimension::D2),
|
||||
base_array_layer: i as u32,
|
||||
array_layer_count: Some(1),
|
||||
..Default::default()
|
||||
})
|
||||
});
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("Point Shadow Sampler"),
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
|
||||
compare: Some(wgpu::CompareFunction::LessEqual),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
Self {
|
||||
texture,
|
||||
view,
|
||||
face_views,
|
||||
sampler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the 6 view matrices for rendering into a point light shadow cube map.
|
||||
///
|
||||
/// Order: +X, -X, +Y, -Y, +Z, -Z (matching wgpu cube face order).
|
||||
///
|
||||
/// Each matrix is a look-at view matrix from `light_pos` toward the
|
||||
/// corresponding axis direction, with an appropriate up vector.
|
||||
pub fn point_shadow_view_matrices(light_pos: Vec3) -> [Mat4; 6] {
|
||||
[
|
||||
// +X: look right
|
||||
Mat4::look_at(light_pos, light_pos + Vec3::X, -Vec3::Y),
|
||||
// -X: look left
|
||||
Mat4::look_at(light_pos, light_pos - Vec3::X, -Vec3::Y),
|
||||
// +Y: look up
|
||||
Mat4::look_at(light_pos, light_pos + Vec3::Y, Vec3::Z),
|
||||
// -Y: look down
|
||||
Mat4::look_at(light_pos, light_pos - Vec3::Y, -Vec3::Z),
|
||||
// +Z: look forward
|
||||
Mat4::look_at(light_pos, light_pos + Vec3::Z, -Vec3::Y),
|
||||
// -Z: look backward
|
||||
Mat4::look_at(light_pos, light_pos - Vec3::Z, -Vec3::Y),
|
||||
]
|
||||
}
|
||||
|
||||
/// Compute the perspective projection for point light shadow rendering.
|
||||
///
|
||||
/// 90 degree FOV, 1:1 aspect ratio.
|
||||
/// `near` and `far` control the shadow range (typically 0.1 and light.range).
|
||||
pub fn point_shadow_projection(near: f32, far: f32) -> Mat4 {
|
||||
Mat4::perspective(std::f32::consts::FRAC_PI_2, 1.0, near, far)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use voltex_math::Vec4;
|
||||
|
||||
fn approx_eq(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 1e-4
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_shadow_view_matrices_count() {
|
||||
let views = point_shadow_view_matrices(Vec3::ZERO);
|
||||
assert_eq!(views.len(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_shadow_view_directions() {
|
||||
let pos = Vec3::new(0.0, 0.0, 0.0);
|
||||
let views = point_shadow_view_matrices(pos);
|
||||
|
||||
// For the +X face, a point at (1, 0, 0) should map to the center of the view
|
||||
// (i.e., to (0, 0, -1) in view space, roughly).
|
||||
let test_point = Vec4::new(1.0, 0.0, 0.0, 1.0);
|
||||
let in_view = views[0].mul_vec4(test_point);
|
||||
// z should be negative (in front of camera)
|
||||
assert!(in_view.z < 0.0, "+X face: point at +X should be in front, got z={}", in_view.z);
|
||||
// x and y should be near 0 (centered)
|
||||
assert!(approx_eq(in_view.x, 0.0), "+X face: expected x~0, got {}", in_view.x);
|
||||
assert!(approx_eq(in_view.y, 0.0), "+X face: expected y~0, got {}", in_view.y);
|
||||
|
||||
// For the -X face, a point at (-1, 0, 0) should be in front
|
||||
let test_neg_x = Vec4::new(-1.0, 0.0, 0.0, 1.0);
|
||||
let in_view_neg = views[1].mul_vec4(test_neg_x);
|
||||
assert!(in_view_neg.z < 0.0, "-X face: point at -X should be in front, got z={}", in_view_neg.z);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_shadow_view_offset_position() {
|
||||
let pos = Vec3::new(5.0, 10.0, -3.0);
|
||||
let views = point_shadow_view_matrices(pos);
|
||||
|
||||
// Origin of the light should map to (0,0,0) in view space
|
||||
let origin = Vec4::from_vec3(pos, 1.0);
|
||||
for (i, view) in views.iter().enumerate() {
|
||||
let v = view.mul_vec4(origin);
|
||||
assert!(approx_eq(v.x, 0.0), "Face {}: origin x should be 0, got {}", i, v.x);
|
||||
assert!(approx_eq(v.y, 0.0), "Face {}: origin y should be 0, got {}", i, v.y);
|
||||
assert!(approx_eq(v.z, 0.0), "Face {}: origin z should be 0, got {}", i, v.z);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_shadow_projection_90fov() {
|
||||
let proj = point_shadow_projection(0.1, 100.0);
|
||||
|
||||
// At 90 degree FOV with aspect 1:1, a point at (-near, 0, -near) in view space
|
||||
// should project to the edge of the viewport.
|
||||
let near = 0.1f32;
|
||||
let edge = Vec4::new(near, 0.0, -near, 1.0);
|
||||
let clip = proj.mul_vec4(edge);
|
||||
let ndc_x = clip.x / clip.w;
|
||||
// Should be at x=1.0 (right edge)
|
||||
assert!(approx_eq(ndc_x, 1.0), "Expected NDC x=1.0, got {}", ndc_x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_shadow_projection_near_plane() {
|
||||
let proj = point_shadow_projection(0.1, 100.0);
|
||||
let p = Vec4::new(0.0, 0.0, -0.1, 1.0);
|
||||
let clip = proj.mul_vec4(p);
|
||||
let ndc_z = clip.z / clip.w;
|
||||
assert!(approx_eq(ndc_z, 0.0), "Near plane should map to NDC z=0, got {}", ndc_z);
|
||||
}
|
||||
}
|
||||
375
crates/voltex_renderer/src/sh.rs
Normal file
375
crates/voltex_renderer/src/sh.rs
Normal file
@@ -0,0 +1,375 @@
|
||||
//! L2 Spherical Harmonics computation for procedural sky irradiance.
|
||||
//!
|
||||
//! Computes 9 SH coefficients (order 2) for 3 channels (RGB) from a procedural sky model.
|
||||
|
||||
use std::f32::consts::PI;
|
||||
|
||||
/// Real SH basis functions for L=0,1,2 evaluated at direction (x, y, z).
|
||||
/// Returns array of 9 basis values.
|
||||
fn sh_basis(x: f32, y: f32, z: f32) -> [f32; 9] {
|
||||
[
|
||||
0.282095, // Y00: L=0 M=0
|
||||
0.488603 * y, // Y1-1: L=1 M=-1
|
||||
0.488603 * z, // Y10: L=1 M=0
|
||||
0.488603 * x, // Y1+1: L=1 M=+1
|
||||
1.092548 * x * y, // Y2-2: L=2 M=-2
|
||||
1.092548 * y * z, // Y2-1: L=2 M=-1
|
||||
0.315392 * (3.0 * z * z - 1.0), // Y20: L=2 M=0
|
||||
1.092548 * x * z, // Y2+1: L=2 M=+1
|
||||
0.546274 * (x * x - y * y), // Y2+2: L=2 M=+2
|
||||
]
|
||||
}
|
||||
|
||||
/// Hosek-Wilkie inspired procedural sky evaluation.
|
||||
/// Returns RGB radiance for a given direction.
|
||||
fn procedural_sky(dir: [f32; 3], sun_dir: [f32; 3], turbidity: f32) -> [f32; 3] {
|
||||
let turb = turbidity.clamp(1.5, 10.0);
|
||||
|
||||
if dir[1] > 0.0 {
|
||||
// Rayleigh scattering: blue zenith, warm horizon
|
||||
let zenith_r = 0.15 / (turb * 0.15 + 0.5);
|
||||
let zenith_g = 0.3 / (turb * 0.15 + 0.5);
|
||||
let zenith_b = 0.8 / (turb * 0.15 + 0.5);
|
||||
|
||||
let horizon_r = 0.7 * (1.0 + turb * 0.04);
|
||||
let horizon_g = 0.6 * (1.0 + turb * 0.04);
|
||||
let horizon_b = 0.5 * (1.0 + turb * 0.04);
|
||||
|
||||
let elevation = dir[1];
|
||||
let t = elevation.powf(0.4);
|
||||
|
||||
let sky_r = horizon_r + (zenith_r - horizon_r) * t;
|
||||
let sky_g = horizon_g + (zenith_g - horizon_g) * t;
|
||||
let sky_b = horizon_b + (zenith_b - horizon_b) * t;
|
||||
|
||||
// Mie scattering near sun
|
||||
let cos_sun = (dir[0] * sun_dir[0] + dir[1] * sun_dir[1] + dir[2] * sun_dir[2]).max(0.0);
|
||||
let mie_strength = turb * 0.02;
|
||||
let mie_factor = mie_strength * cos_sun.powi(8);
|
||||
|
||||
// Sun disk
|
||||
let sun_factor = cos_sun.powi(2048);
|
||||
|
||||
[
|
||||
sky_r + mie_factor * 1.0 + sun_factor * 10.0,
|
||||
sky_g + mie_factor * 0.9 + sun_factor * 9.0,
|
||||
sky_b + mie_factor * 0.7 + sun_factor * 7.0,
|
||||
]
|
||||
} else {
|
||||
// Ground
|
||||
let t = (-dir[1]).powf(0.4);
|
||||
let horizon = [0.6f32, 0.55, 0.45];
|
||||
let ground = [0.1f32, 0.08, 0.06];
|
||||
[
|
||||
horizon[0] + (ground[0] - horizon[0]) * t,
|
||||
horizon[1] + (ground[1] - horizon[1]) * t,
|
||||
horizon[2] + (ground[2] - horizon[2]) * t,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_vec3(v: [f32; 3]) -> [f32; 3] {
|
||||
let len = (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]).sqrt();
|
||||
if len < 1e-8 {
|
||||
return [0.0, 1.0, 0.0];
|
||||
}
|
||||
[v[0] / len, v[1] / len, v[2] / len]
|
||||
}
|
||||
|
||||
/// Compute L2 (order 2) SH coefficients from the procedural sky model.
|
||||
///
|
||||
/// Samples the environment at `num_samples` directions distributed over the full sphere
|
||||
/// and accumulates SH basis function values weighted by environment radiance.
|
||||
///
|
||||
/// Returns 9 RGB coefficient triplets.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `sun_dir` - Normalized sun direction vector
|
||||
/// * `sun_color` - Sun color (unused in current procedural model, kept for API compatibility)
|
||||
/// * `turbidity` - Atmospheric turbidity (1.5 to 10.0)
|
||||
pub fn compute_sh_coefficients(
|
||||
sun_dir: [f32; 3],
|
||||
_sun_color: [f32; 3],
|
||||
turbidity: f32,
|
||||
) -> [[f32; 3]; 9] {
|
||||
compute_sh_coefficients_with_samples(sun_dir, _sun_color, turbidity, 128)
|
||||
}
|
||||
|
||||
/// Same as `compute_sh_coefficients` but with configurable sample count per axis.
|
||||
/// Total samples = `samples_per_axis * samples_per_axis`.
|
||||
pub fn compute_sh_coefficients_with_samples(
|
||||
sun_dir: [f32; 3],
|
||||
_sun_color: [f32; 3],
|
||||
turbidity: f32,
|
||||
samples_per_axis: u32,
|
||||
) -> [[f32; 3]; 9] {
|
||||
let sun_dir = normalize_vec3(sun_dir);
|
||||
let n = samples_per_axis;
|
||||
let total = n * n;
|
||||
|
||||
let mut coeffs = [[0.0f32; 3]; 9];
|
||||
|
||||
// Sample uniformly on the sphere using spherical coordinates
|
||||
for i in 0..n {
|
||||
let theta = PI * (i as f32 + 0.5) / n as f32; // [0, pi]
|
||||
let sin_theta = theta.sin();
|
||||
let cos_theta = theta.cos();
|
||||
|
||||
for j in 0..n {
|
||||
let phi = 2.0 * PI * (j as f32 + 0.5) / n as f32; // [0, 2pi]
|
||||
|
||||
let x = sin_theta * phi.cos();
|
||||
let y = cos_theta; // y-up convention
|
||||
let z = sin_theta * phi.sin();
|
||||
|
||||
let radiance = procedural_sky([x, y, z], sun_dir, turbidity);
|
||||
let basis = sh_basis(x, y, z);
|
||||
|
||||
// Monte Carlo weight: sphere area = 4*pi, uniform PDF = 1/(4*pi)
|
||||
// weight = radiance * basis * sin_theta * (pi/n) * (2*pi/n) / (1/(4*pi))
|
||||
// But for uniform sphere sampling with stratified grid:
|
||||
// weight = (4*pi / total) * radiance * basis
|
||||
// sin_theta is already accounted for by the area element
|
||||
let weight = 4.0 * PI * sin_theta / total as f32;
|
||||
// Actually the correct formula for stratified spherical integration:
|
||||
// dA = sin(theta) * dtheta * dphi
|
||||
// dtheta = pi/n, dphi = 2*pi/n
|
||||
// weight = sin(theta) * (pi/n) * (2*pi/n)
|
||||
let _correct_weight = sin_theta * (PI / n as f32) * (2.0 * PI / n as f32);
|
||||
|
||||
for k in 0..9 {
|
||||
let w = _correct_weight * basis[k];
|
||||
coeffs[k][0] += w * radiance[0];
|
||||
coeffs[k][1] += w * radiance[1];
|
||||
coeffs[k][2] += w * radiance[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coeffs
|
||||
}
|
||||
|
||||
/// Pack 9 RGB SH coefficients into 7 vec4s (28 floats) for GPU uniform buffer.
|
||||
/// Layout: coefficients are stored sequentially as [c0.r, c0.g, c0.b, c1.r, c1.g, c1.b, ...]
|
||||
/// packed into vec4s.
|
||||
pub fn pack_sh_coefficients(coeffs: &[[f32; 3]; 9]) -> [[f32; 4]; 7] {
|
||||
// Flatten 9*3 = 27 floats, pad to 28
|
||||
let mut flat = [0.0f32; 28];
|
||||
for (i, c) in coeffs.iter().enumerate() {
|
||||
flat[i * 3] = c[0];
|
||||
flat[i * 3 + 1] = c[1];
|
||||
flat[i * 3 + 2] = c[2];
|
||||
}
|
||||
// flat[27] = 0.0 (padding)
|
||||
|
||||
let mut packed = [[0.0f32; 4]; 7];
|
||||
for i in 0..7 {
|
||||
packed[i] = [flat[i * 4], flat[i * 4 + 1], flat[i * 4 + 2], flat[i * 4 + 3]];
|
||||
}
|
||||
packed
|
||||
}
|
||||
|
||||
/// Evaluate SH at a given normal direction (CPU-side, for testing).
|
||||
pub fn evaluate_sh_cpu(normal: [f32; 3], coeffs: &[[f32; 3]; 9]) -> [f32; 3] {
|
||||
let basis = sh_basis(normal[0], normal[1], normal[2]);
|
||||
let mut result = [0.0f32; 3];
|
||||
for k in 0..9 {
|
||||
result[0] += coeffs[k][0] * basis[k];
|
||||
result[1] += coeffs[k][1] * basis[k];
|
||||
result[2] += coeffs[k][2] * basis[k];
|
||||
}
|
||||
// Clamp to non-negative
|
||||
result[0] = result[0].max(0.0);
|
||||
result[1] = result[1].max(0.0);
|
||||
result[2] = result[2].max(0.0);
|
||||
result
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// For a uniform white environment (radiance = 1.0 everywhere),
|
||||
/// only L=0 coefficient should be non-zero, equal to sqrt(4*pi) * 1.0 / sqrt(4*pi) = sqrt(pi) / ...
|
||||
/// Actually, for uniform radiance L(d) = 1:
|
||||
/// c_00 = integral(1 * Y00 * dw) = Y00 * 4*pi = 0.282095 * 4*pi ≈ 3.5449
|
||||
/// All other coefficients should be ~0.
|
||||
#[test]
|
||||
fn test_sh_uniform_white_environment() {
|
||||
// We'll compute SH for a "uniform white" sky by using a custom function
|
||||
// Instead of procedural_sky, we test the basis directly
|
||||
let n = 128u32;
|
||||
let mut coeffs = [[0.0f32; 3]; 9];
|
||||
|
||||
for i in 0..n {
|
||||
let theta = PI * (i as f32 + 0.5) / n as f32;
|
||||
let sin_theta = theta.sin();
|
||||
let cos_theta = theta.cos();
|
||||
|
||||
for j in 0..n {
|
||||
let phi = 2.0 * PI * (j as f32 + 0.5) / n as f32;
|
||||
let x = sin_theta * phi.cos();
|
||||
let y = cos_theta;
|
||||
let z = sin_theta * phi.sin();
|
||||
|
||||
let radiance = [1.0f32, 1.0, 1.0]; // uniform white
|
||||
let basis = sh_basis(x, y, z);
|
||||
let weight = sin_theta * (PI / n as f32) * (2.0 * PI / n as f32);
|
||||
|
||||
for k in 0..9 {
|
||||
let w = weight * basis[k];
|
||||
coeffs[k][0] += w * radiance[0];
|
||||
coeffs[k][1] += w * radiance[1];
|
||||
coeffs[k][2] += w * radiance[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// L=0 coefficient should be approximately 0.282095 * 4*pi ≈ 3.5449
|
||||
let expected_c0 = 0.282095 * 4.0 * PI;
|
||||
assert!(
|
||||
(coeffs[0][0] - expected_c0).abs() < 0.05,
|
||||
"c0.r = {} expected ~{}", coeffs[0][0], expected_c0
|
||||
);
|
||||
assert!(
|
||||
(coeffs[0][1] - expected_c0).abs() < 0.05,
|
||||
"c0.g = {} expected ~{}", coeffs[0][1], expected_c0
|
||||
);
|
||||
assert!(
|
||||
(coeffs[0][2] - expected_c0).abs() < 0.05,
|
||||
"c0.b = {} expected ~{}", coeffs[0][2], expected_c0
|
||||
);
|
||||
|
||||
// All higher-order coefficients should be ~0
|
||||
for k in 1..9 {
|
||||
for ch in 0..3 {
|
||||
assert!(
|
||||
coeffs[k][ch].abs() < 0.05,
|
||||
"c{}[{}] = {} should be ~0", k, ch, coeffs[k][ch]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For a directional light along +Y, L1 coefficient for Y (index 1) should dominate.
|
||||
#[test]
|
||||
fn test_sh_directional_light_dominant_l1() {
|
||||
// Simulate a directional "light" by using a sky that is bright only near +Y
|
||||
let n = 128u32;
|
||||
let mut coeffs = [[0.0f32; 3]; 9];
|
||||
|
||||
for i in 0..n {
|
||||
let theta = PI * (i as f32 + 0.5) / n as f32;
|
||||
let sin_theta = theta.sin();
|
||||
let cos_theta = theta.cos();
|
||||
|
||||
for j in 0..n {
|
||||
let phi = 2.0 * PI * (j as f32 + 0.5) / n as f32;
|
||||
let x = sin_theta * phi.cos();
|
||||
let y = cos_theta;
|
||||
let z = sin_theta * phi.sin();
|
||||
|
||||
// Concentrated light near +Y direction
|
||||
let intensity = y.max(0.0).powi(32);
|
||||
let radiance = [intensity; 3];
|
||||
let basis = sh_basis(x, y, z);
|
||||
let weight = sin_theta * (PI / n as f32) * (2.0 * PI / n as f32);
|
||||
|
||||
for k in 0..9 {
|
||||
let w = weight * basis[k];
|
||||
coeffs[k][0] += w * radiance[0];
|
||||
coeffs[k][1] += w * radiance[1];
|
||||
coeffs[k][2] += w * radiance[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// L1 Y-component (index 1, which is 0.488603 * y) should be significant
|
||||
// L0 (index 0) should also be non-zero (DC component)
|
||||
assert!(
|
||||
coeffs[0][0] > 0.01,
|
||||
"L0 coefficient should be positive for directional light: {}", coeffs[0][0]
|
||||
);
|
||||
assert!(
|
||||
coeffs[1][0] > 0.01,
|
||||
"L1(-1) Y-direction coefficient should be positive: {}", coeffs[1][0]
|
||||
);
|
||||
|
||||
// The L1 Y-component should be the largest L1 component (light is along +Y)
|
||||
assert!(
|
||||
coeffs[1][0].abs() > coeffs[2][0].abs(),
|
||||
"L1(-1) Y should dominate over L1(0) Z: {} vs {}", coeffs[1][0], coeffs[2][0]
|
||||
);
|
||||
assert!(
|
||||
coeffs[1][0].abs() > coeffs[3][0].abs(),
|
||||
"L1(-1) Y should dominate over L1(+1) X: {} vs {}", coeffs[1][0], coeffs[3][0]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sh_procedural_sky_coefficients() {
|
||||
let coeffs = compute_sh_coefficients(
|
||||
[0.5, -0.7, 0.5],
|
||||
[1.0, 1.0, 1.0],
|
||||
3.0,
|
||||
);
|
||||
|
||||
// L0 should be positive (sky has positive radiance)
|
||||
assert!(coeffs[0][0] > 0.0, "L0 R should be positive");
|
||||
assert!(coeffs[0][1] > 0.0, "L0 G should be positive");
|
||||
assert!(coeffs[0][2] > 0.0, "L0 B should be positive");
|
||||
|
||||
// Verify coefficients are finite
|
||||
for k in 0..9 {
|
||||
for ch in 0..3 {
|
||||
assert!(
|
||||
coeffs[k][ch].is_finite(),
|
||||
"SH coefficient c{}[{}] = {} is not finite", k, ch, coeffs[k][ch]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pack_sh_coefficients() {
|
||||
let mut coeffs = [[0.0f32; 3]; 9];
|
||||
for k in 0..9 {
|
||||
coeffs[k] = [(k * 3) as f32, (k * 3 + 1) as f32, (k * 3 + 2) as f32];
|
||||
}
|
||||
|
||||
let packed = pack_sh_coefficients(&coeffs);
|
||||
|
||||
// Verify flat layout: c0.r, c0.g, c0.b, c1.r, c1.g, c1.b, ...
|
||||
assert_eq!(packed[0], [0.0, 1.0, 2.0, 3.0]); // c0.rgb, c1.r
|
||||
assert_eq!(packed[1], [4.0, 5.0, 6.0, 7.0]); // c1.gb, c2.rg
|
||||
assert_eq!(packed[6][0], 24.0); // c8.r
|
||||
assert_eq!(packed[6][1], 25.0); // c8.g
|
||||
assert_eq!(packed[6][2], 26.0); // c8.b
|
||||
assert_eq!(packed[6][3], 0.0); // padding
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evaluate_sh_cpu_positive() {
|
||||
let coeffs = compute_sh_coefficients(
|
||||
[0.5, -0.7, 0.5],
|
||||
[1.0, 1.0, 1.0],
|
||||
3.0,
|
||||
);
|
||||
|
||||
// Evaluate at several directions — should be non-negative
|
||||
let dirs = [
|
||||
[0.0, 1.0, 0.0], // up
|
||||
[0.0, -1.0, 0.0], // down
|
||||
[1.0, 0.0, 0.0], // right
|
||||
[0.0, 0.0, 1.0], // forward
|
||||
];
|
||||
|
||||
for dir in &dirs {
|
||||
let result = evaluate_sh_cpu(*dir, &coeffs);
|
||||
assert!(
|
||||
result[0] >= 0.0 && result[1] >= 0.0 && result[2] >= 0.0,
|
||||
"SH evaluation at {:?} should be non-negative: {:?}", dir, result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,6 +144,11 @@ pub struct ShadowUniform {
|
||||
pub shadow_map_size: f32,
|
||||
pub shadow_bias: f32,
|
||||
pub _padding: [f32; 2],
|
||||
// Sky parameters (Hosek-Wilkie inspired)
|
||||
pub sun_direction: [f32; 3],
|
||||
pub turbidity: f32,
|
||||
// L2 Spherical Harmonics coefficients: 9 RGB coefficients packed into 7 vec4s (28 floats)
|
||||
pub sh_coefficients: [[f32; 4]; 7],
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
|
||||
148
crates/voltex_renderer/src/spot_shadow.rs
Normal file
148
crates/voltex_renderer/src/spot_shadow.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use voltex_math::{Mat4, Vec3};
|
||||
use crate::shadow::{SHADOW_FORMAT, SHADOW_MAP_SIZE};
|
||||
|
||||
/// Shadow map for a single spot light. Reuses the same texture format and
|
||||
/// resolution as the directional `ShadowMap`.
|
||||
pub struct SpotShadowMap {
|
||||
pub texture: wgpu::Texture,
|
||||
pub view: wgpu::TextureView,
|
||||
pub sampler: wgpu::Sampler,
|
||||
}
|
||||
|
||||
impl SpotShadowMap {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Spot Shadow Map Texture"),
|
||||
size: wgpu::Extent3d {
|
||||
width: SHADOW_MAP_SIZE,
|
||||
height: SHADOW_MAP_SIZE,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: SHADOW_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("Spot Shadow Sampler"),
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
|
||||
compare: Some(wgpu::CompareFunction::LessEqual),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
Self { texture, view, sampler }
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the view-projection matrix for a spot light shadow pass.
|
||||
///
|
||||
/// - `position`: world-space position of the spot light
|
||||
/// - `direction`: normalized direction the spot light points toward
|
||||
/// - `outer_angle`: outer cone half-angle in **radians**
|
||||
/// - `range`: maximum distance the spot light reaches
|
||||
///
|
||||
/// The projection uses a perspective matrix with FOV = 2 * outer_angle,
|
||||
/// 1:1 aspect ratio, near = 0.1, far = range.
|
||||
pub fn spot_shadow_matrix(position: Vec3, direction: Vec3, outer_angle: f32, range: f32) -> Mat4 {
|
||||
let dir = direction.normalize();
|
||||
|
||||
// Pick a stable up vector that isn't parallel to the light direction.
|
||||
let up = if dir.cross(Vec3::Y).length_squared() < 1e-6 {
|
||||
Vec3::Z
|
||||
} else {
|
||||
Vec3::Y
|
||||
};
|
||||
|
||||
let target = position + dir;
|
||||
let view = Mat4::look_at(position, target, up);
|
||||
|
||||
// FOV = 2 * outer_angle; clamped to avoid degenerate projections.
|
||||
let fov = (2.0 * outer_angle).min(std::f32::consts::PI - 0.01);
|
||||
let near = 0.1_f32;
|
||||
let far = range.max(near + 0.1);
|
||||
let proj = Mat4::perspective(fov, 1.0, near, far);
|
||||
|
||||
proj.mul_mat4(&view)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use voltex_math::Vec4;
|
||||
|
||||
fn approx_eq(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 1e-4
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spot_shadow_matrix_center() {
|
||||
let pos = Vec3::new(0.0, 10.0, 0.0);
|
||||
let dir = Vec3::new(0.0, -1.0, 0.0);
|
||||
let outer_angle = 30.0_f32.to_radians();
|
||||
let range = 20.0;
|
||||
|
||||
let vp = spot_shadow_matrix(pos, dir, outer_angle, range);
|
||||
|
||||
// A point directly below the light (along the direction) should map near center of NDC.
|
||||
let test_point = Vec4::new(0.0, 5.0, 0.0, 1.0); // 5 units below the light
|
||||
let clip = vp.mul_vec4(test_point);
|
||||
let ndc_x = clip.x / clip.w;
|
||||
let ndc_y = clip.y / clip.w;
|
||||
|
||||
assert!(approx_eq(ndc_x, 0.0), "Center point should have NDC x~0, got {}", ndc_x);
|
||||
// y may not be exactly 0 due to the look_at up vector choice, but should be close
|
||||
assert!(ndc_y.abs() < 0.5, "Center point should be near NDC center, got y={}", ndc_y);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spot_shadow_matrix_depth_range() {
|
||||
let pos = Vec3::new(0.0, 0.0, 0.0);
|
||||
let dir = Vec3::new(0.0, 0.0, -1.0);
|
||||
let outer_angle = 45.0_f32.to_radians();
|
||||
let range = 50.0;
|
||||
|
||||
let vp = spot_shadow_matrix(pos, dir, outer_angle, range);
|
||||
|
||||
// A point at near distance should have NDC z ~ 0
|
||||
let near_point = Vec4::new(0.0, 0.0, -0.1, 1.0);
|
||||
let clip_near = vp.mul_vec4(near_point);
|
||||
let ndc_z_near = clip_near.z / clip_near.w;
|
||||
assert!(ndc_z_near >= -0.1 && ndc_z_near <= 0.2,
|
||||
"Near point NDC z should be ~0, got {}", ndc_z_near);
|
||||
|
||||
// A point at far distance should have NDC z ~ 1
|
||||
let far_point = Vec4::new(0.0, 0.0, -50.0, 1.0);
|
||||
let clip_far = vp.mul_vec4(far_point);
|
||||
let ndc_z_far = clip_far.z / clip_far.w;
|
||||
assert!(ndc_z_far > 0.9 && ndc_z_far <= 1.01,
|
||||
"Far point NDC z should be ~1, got {}", ndc_z_far);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spot_shadow_matrix_not_identity() {
|
||||
let pos = Vec3::new(5.0, 5.0, 5.0);
|
||||
let dir = Vec3::new(-1.0, -1.0, -1.0).normalize();
|
||||
let vp = spot_shadow_matrix(pos, dir, 25.0_f32.to_radians(), 30.0);
|
||||
assert_ne!(vp.cols, Mat4::IDENTITY.cols);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spot_shadow_matrix_direction_down() {
|
||||
// Light pointing straight down should work (uses Z as up instead of Y)
|
||||
let pos = Vec3::new(0.0, 10.0, 0.0);
|
||||
let dir = Vec3::new(0.0, -1.0, 0.0);
|
||||
let vp = spot_shadow_matrix(pos, dir, 30.0_f32.to_radians(), 15.0);
|
||||
// Should not panic; the matrix should be valid
|
||||
assert_ne!(vp.cols, Mat4::IDENTITY.cols);
|
||||
}
|
||||
}
|
||||
@@ -155,6 +155,15 @@ impl GpuTexture {
|
||||
Self::from_rgba(device, queue, 1, 1, &[255, 255, 255, 255], layout)
|
||||
}
|
||||
|
||||
/// Create a 1x1 black texture (RGBA 0,0,0,255). Used as default emissive (no emission).
|
||||
pub fn black_1x1(
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
layout: &wgpu::BindGroupLayout,
|
||||
) -> Self {
|
||||
Self::from_rgba(device, queue, 1, 1, &[0, 0, 0, 255], layout)
|
||||
}
|
||||
|
||||
pub fn bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("TextureBindGroupLayout"),
|
||||
@@ -316,6 +325,140 @@ pub fn create_pbr_texture_bind_group(
|
||||
})
|
||||
}
|
||||
|
||||
/// Bind group layout for full PBR textures: albedo (0-1) + normal (2-3) + ORM (4-5) + emissive (6-7).
|
||||
pub fn pbr_full_texture_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("PBR Full Texture Bind Group Layout"),
|
||||
entries: &[
|
||||
// binding 0: albedo texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// binding 1: albedo sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// binding 2: normal map texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// binding 3: normal map sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// binding 4: ORM texture (AO/Roughness/Metallic)
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// binding 5: ORM sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 5,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// binding 6: emissive texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 6,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// binding 7: emissive sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 7,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a bind group for full PBR textures (albedo + normal + ORM + emissive).
|
||||
pub fn create_pbr_full_texture_bind_group(
|
||||
device: &wgpu::Device,
|
||||
layout: &wgpu::BindGroupLayout,
|
||||
albedo_view: &wgpu::TextureView,
|
||||
albedo_sampler: &wgpu::Sampler,
|
||||
normal_view: &wgpu::TextureView,
|
||||
normal_sampler: &wgpu::Sampler,
|
||||
orm_view: &wgpu::TextureView,
|
||||
orm_sampler: &wgpu::Sampler,
|
||||
emissive_view: &wgpu::TextureView,
|
||||
emissive_sampler: &wgpu::Sampler,
|
||||
) -> wgpu::BindGroup {
|
||||
device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("PBR Full Texture Bind Group"),
|
||||
layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(albedo_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(albedo_sampler),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::TextureView(normal_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 3,
|
||||
resource: wgpu::BindingResource::Sampler(normal_sampler),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 4,
|
||||
resource: wgpu::BindingResource::TextureView(orm_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 5,
|
||||
resource: wgpu::BindingResource::Sampler(orm_sampler),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 6,
|
||||
resource: wgpu::BindingResource::TextureView(emissive_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 7,
|
||||
resource: wgpu::BindingResource::Sampler(emissive_sampler),
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
187
crates/voltex_script/src/coroutine.rs
Normal file
187
crates/voltex_script/src/coroutine.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use crate::ffi;
|
||||
use std::ffi::CString;
|
||||
|
||||
/// Status returned by a coroutine resume.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum CoroutineStatus {
|
||||
Yielded,
|
||||
Finished,
|
||||
}
|
||||
|
||||
/// A Lua coroutine backed by a lua_newthread.
|
||||
pub struct LuaCoroutine {
|
||||
/// The coroutine thread state.
|
||||
state: *mut ffi::lua_State,
|
||||
/// Whether the coroutine has finished execution.
|
||||
finished: bool,
|
||||
}
|
||||
|
||||
impl crate::state::LuaState {
|
||||
/// Create a coroutine from a global Lua function name.
|
||||
/// The function must already be defined in the Lua state.
|
||||
pub fn create_coroutine(&self, func_name: &str) -> Result<LuaCoroutine, String> {
|
||||
unsafe {
|
||||
let co_state = ffi::lua_newthread(self.raw());
|
||||
if co_state.is_null() {
|
||||
return Err("failed to create Lua thread".to_string());
|
||||
}
|
||||
// Pop the thread from the main stack (it's anchored in the registry)
|
||||
// Actually, we need to keep the thread referenced. lua_newthread pushes it
|
||||
// onto the main stack. We'll leave it there (Lua GC won't collect it while
|
||||
// it's on the stack). For simplicity, pop it — Lua keeps a reference in the
|
||||
// registry as long as the coroutine is alive.
|
||||
// Note: lua_newthread creates a thread that is anchored by the registry
|
||||
// automatically in Lua 5.4, so we can pop it from the main stack.
|
||||
ffi::lua_pop(self.raw(), 1);
|
||||
|
||||
// Push the function onto the coroutine stack
|
||||
let c_name = CString::new(func_name).map_err(|e| e.to_string())?;
|
||||
let ty = ffi::lua_getglobal(co_state, c_name.as_ptr());
|
||||
if ty != ffi::LUA_TFUNCTION {
|
||||
ffi::lua_pop(co_state, 1);
|
||||
return Err(format!("'{}' is not a Lua function", func_name));
|
||||
}
|
||||
|
||||
Ok(LuaCoroutine {
|
||||
state: co_state,
|
||||
finished: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LuaCoroutine {
|
||||
/// Resume the coroutine. Returns Yielded if the coroutine yielded,
|
||||
/// or Finished if it completed.
|
||||
pub fn resume(&mut self) -> Result<CoroutineStatus, String> {
|
||||
if self.finished {
|
||||
return Err("coroutine already finished".to_string());
|
||||
}
|
||||
unsafe {
|
||||
let mut nresults: std::os::raw::c_int = 0;
|
||||
let status = ffi::lua_resume(self.state, std::ptr::null_mut(), 0, &mut nresults);
|
||||
// Pop any results from the coroutine stack
|
||||
if nresults > 0 {
|
||||
ffi::lua_pop(self.state, nresults);
|
||||
}
|
||||
match status {
|
||||
ffi::LUA_YIELD => Ok(CoroutineStatus::Yielded),
|
||||
ffi::LUA_OK => {
|
||||
self.finished = true;
|
||||
Ok(CoroutineStatus::Finished)
|
||||
}
|
||||
_ => {
|
||||
self.finished = true;
|
||||
let ptr = ffi::lua_tostring(self.state, -1);
|
||||
let msg = if ptr.is_null() {
|
||||
"coroutine error".to_string()
|
||||
} else {
|
||||
std::ffi::CStr::from_ptr(ptr).to_string_lossy().into_owned()
|
||||
};
|
||||
ffi::lua_pop(self.state, 1);
|
||||
Err(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the coroutine has finished execution.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.finished
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::state::LuaState;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_simple_coroutine_yield_resume() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("
|
||||
function my_coro()
|
||||
step = 1
|
||||
coroutine.yield()
|
||||
step = 2
|
||||
coroutine.yield()
|
||||
step = 3
|
||||
end
|
||||
step = 0
|
||||
").unwrap();
|
||||
|
||||
let mut co = lua.create_coroutine("my_coro").unwrap();
|
||||
assert!(!co.is_finished());
|
||||
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(status, CoroutineStatus::Yielded);
|
||||
assert_eq!(lua.get_global_number("step"), Some(1.0));
|
||||
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(status, CoroutineStatus::Yielded);
|
||||
assert_eq!(lua.get_global_number("step"), Some(2.0));
|
||||
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(status, CoroutineStatus::Finished);
|
||||
assert_eq!(lua.get_global_number("step"), Some(3.0));
|
||||
assert!(co.is_finished());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_step_coroutine() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("
|
||||
function counter_coro()
|
||||
for i = 1, 5 do
|
||||
count = i
|
||||
coroutine.yield()
|
||||
end
|
||||
end
|
||||
count = 0
|
||||
").unwrap();
|
||||
|
||||
let mut co = lua.create_coroutine("counter_coro").unwrap();
|
||||
for i in 1..=5 {
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(lua.get_global_number("count"), Some(i as f64));
|
||||
if i < 5 {
|
||||
assert_eq!(status, CoroutineStatus::Yielded);
|
||||
} else {
|
||||
// After the last yield, the loop ends and the function returns
|
||||
// Actually the yield happens inside the loop, so after i=5 it yields,
|
||||
// then we resume once more to finish
|
||||
}
|
||||
}
|
||||
// One more resume to finish the coroutine (the loop body yields after setting count)
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(status, CoroutineStatus::Finished);
|
||||
assert!(co.is_finished());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_finished_detection() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("
|
||||
function instant()
|
||||
result = 42
|
||||
end
|
||||
").unwrap();
|
||||
|
||||
let mut co = lua.create_coroutine("instant").unwrap();
|
||||
assert!(!co.is_finished());
|
||||
let status = co.resume().unwrap();
|
||||
assert_eq!(status, CoroutineStatus::Finished);
|
||||
assert!(co.is_finished());
|
||||
assert_eq!(lua.get_global_number("result"), Some(42.0));
|
||||
|
||||
// Resuming again should error
|
||||
assert!(co.resume().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_coroutine_invalid_function() {
|
||||
let lua = LuaState::new();
|
||||
let result = lua.create_coroutine("nonexistent");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -8,8 +8,13 @@ pub type lua_CFunction = unsafe extern "C" fn(*mut lua_State) -> c_int;
|
||||
|
||||
// Constants
|
||||
pub const LUA_OK: c_int = 0;
|
||||
pub const LUA_YIELD: c_int = 1;
|
||||
pub const LUA_TNIL: c_int = 0;
|
||||
pub const LUA_TBOOLEAN: c_int = 1;
|
||||
pub const LUA_TNUMBER: c_int = 3;
|
||||
pub const LUA_TSTRING: c_int = 4;
|
||||
pub const LUA_TTABLE: c_int = 5;
|
||||
pub const LUA_TFUNCTION: c_int = 6;
|
||||
pub const LUA_MULTRET: c_int = -1;
|
||||
|
||||
extern "C" {
|
||||
@@ -26,21 +31,41 @@ extern "C" {
|
||||
pub fn lua_gettop(L: *mut lua_State) -> c_int;
|
||||
pub fn lua_settop(L: *mut lua_State, idx: c_int);
|
||||
pub fn lua_type(L: *mut lua_State, idx: c_int) -> c_int;
|
||||
pub fn lua_pushvalue(L: *mut lua_State, idx: c_int);
|
||||
|
||||
// Push
|
||||
pub fn lua_pushnumber(L: *mut lua_State, n: lua_Number);
|
||||
pub fn lua_pushinteger(L: *mut lua_State, n: lua_Integer);
|
||||
pub fn lua_pushstring(L: *mut lua_State, s: *const c_char) -> *const c_char;
|
||||
pub fn lua_pushcclosure(L: *mut lua_State, f: lua_CFunction, n: c_int);
|
||||
pub fn lua_pushlightuserdata(L: *mut lua_State, p: *mut c_void);
|
||||
pub fn lua_pushboolean(L: *mut lua_State, b: c_int);
|
||||
pub fn lua_pushnil(L: *mut lua_State);
|
||||
|
||||
// Get
|
||||
pub fn lua_tonumberx(L: *mut lua_State, idx: c_int, isnum: *mut c_int) -> lua_Number;
|
||||
pub fn lua_tolstring(L: *mut lua_State, idx: c_int, len: *mut usize) -> *const c_char;
|
||||
pub fn lua_touserdata(L: *mut lua_State, idx: c_int) -> *mut c_void;
|
||||
pub fn lua_toboolean(L: *mut lua_State, idx: c_int) -> c_int;
|
||||
pub fn lua_tointegerx(L: *mut lua_State, idx: c_int, isnum: *mut c_int) -> lua_Integer;
|
||||
|
||||
// Table
|
||||
pub fn lua_createtable(L: *mut lua_State, narr: c_int, nrec: c_int);
|
||||
pub fn lua_setfield(L: *mut lua_State, idx: c_int, k: *const c_char);
|
||||
pub fn lua_getfield(L: *mut lua_State, idx: c_int, k: *const c_char) -> c_int;
|
||||
pub fn lua_next(L: *mut lua_State, idx: c_int) -> c_int;
|
||||
pub fn lua_rawgeti(L: *mut lua_State, idx: c_int, n: lua_Integer) -> c_int;
|
||||
pub fn lua_rawseti(L: *mut lua_State, idx: c_int, n: lua_Integer);
|
||||
|
||||
// Globals
|
||||
pub fn lua_getglobal(L: *mut lua_State, name: *const c_char) -> c_int;
|
||||
pub fn lua_setglobal(L: *mut lua_State, name: *const c_char);
|
||||
|
||||
// Coroutine
|
||||
pub fn lua_newthread(L: *mut lua_State) -> *mut lua_State;
|
||||
pub fn lua_resume(L: *mut lua_State, from: *mut lua_State, nargs: c_int, nresults: *mut c_int) -> c_int;
|
||||
pub fn lua_xmove(from: *mut lua_State, to: *mut lua_State, n: c_int);
|
||||
pub fn lua_status(L: *mut lua_State) -> c_int;
|
||||
}
|
||||
|
||||
// Helper: lua_pcall macro equivalent
|
||||
|
||||
150
crates/voltex_script/src/sandbox.rs
Normal file
150
crates/voltex_script/src/sandbox.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
use crate::state::LuaState;
|
||||
|
||||
/// List of dangerous globals to remove for sandboxing.
|
||||
const BLOCKED_GLOBALS: &[&str] = &[
|
||||
"os",
|
||||
"io",
|
||||
"loadfile",
|
||||
"dofile",
|
||||
"require",
|
||||
"load", // can load arbitrary bytecode
|
||||
"rawget", // bypass metatables
|
||||
"rawset", // bypass metatables
|
||||
"rawequal",
|
||||
"rawlen",
|
||||
"collectgarbage", // can manipulate GC
|
||||
"debug", // full debug access
|
||||
];
|
||||
|
||||
/// Apply sandboxing to a LuaState by removing dangerous globals.
|
||||
/// Call this after `LuaState::new()` and before executing any user scripts.
|
||||
///
|
||||
/// Allowed: math, string, table, pairs, ipairs, print, type, tostring, tonumber,
|
||||
/// pcall, xpcall, error, select, unpack, next, coroutine, assert
|
||||
pub fn create_sandbox(state: &LuaState) -> Result<(), String> {
|
||||
let mut code = String::new();
|
||||
for global in BLOCKED_GLOBALS {
|
||||
code.push_str(&format!("{} = nil\n", global));
|
||||
}
|
||||
state.exec(&code)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::state::LuaState;
|
||||
|
||||
#[test]
|
||||
fn test_os_execute_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("os.execute('echo hello')");
|
||||
assert!(result.is_err(), "os.execute should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("io.open('/etc/passwd', 'r')");
|
||||
assert!(result.is_err(), "io.open should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_loadfile_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("loadfile('something.lua')");
|
||||
assert!(result.is_err(), "loadfile should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dofile_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("dofile('something.lua')");
|
||||
assert!(result.is_err(), "dofile should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_require_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("require('os')");
|
||||
assert!(result.is_err(), "require should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_blocked() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
let result = lua.exec("debug.getinfo(1)");
|
||||
assert!(result.is_err(), "debug should be blocked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_math_allowed() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("result = math.sin(0)").unwrap();
|
||||
assert_eq!(lua.get_global_number("result"), Some(0.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_allowed() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("result = string.len('hello')").unwrap();
|
||||
assert_eq!(lua.get_global_number("result"), Some(5.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_table_functions_allowed() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("
|
||||
t = {3, 1, 2}
|
||||
table.sort(t)
|
||||
result = t[1]
|
||||
").unwrap();
|
||||
assert_eq!(lua.get_global_number("result"), Some(1.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pairs_ipairs_allowed() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("
|
||||
sum = 0
|
||||
for _, v in ipairs({1, 2, 3}) do sum = sum + v end
|
||||
").unwrap();
|
||||
assert_eq!(lua.get_global_number("sum"), Some(6.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_type_tostring_tonumber_allowed() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("
|
||||
t = type(42)
|
||||
n = tonumber('10')
|
||||
s = tostring(42)
|
||||
").unwrap();
|
||||
assert_eq!(lua.get_global_string("t"), Some("number".to_string()));
|
||||
assert_eq!(lua.get_global_number("n"), Some(10.0));
|
||||
assert_eq!(lua.get_global_string("s"), Some("42".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coroutine_allowed_after_sandbox() {
|
||||
let lua = LuaState::new();
|
||||
create_sandbox(&lua).unwrap();
|
||||
lua.exec("
|
||||
function coro() coroutine.yield() end
|
||||
co = coroutine.create(coro)
|
||||
coroutine.resume(co)
|
||||
status = coroutine.status(co)
|
||||
").unwrap();
|
||||
assert_eq!(lua.get_global_string("status"), Some("suspended".to_string()));
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,16 @@
|
||||
use std::ffi::{CStr, CString};
|
||||
use crate::ffi;
|
||||
|
||||
/// Represents a value that can be passed to/from Lua.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum LuaValue {
|
||||
Nil,
|
||||
Bool(bool),
|
||||
Number(f64),
|
||||
String(String),
|
||||
Table(Vec<(String, LuaValue)>),
|
||||
}
|
||||
|
||||
pub struct LuaState {
|
||||
state: *mut ffi::lua_State,
|
||||
}
|
||||
@@ -109,6 +119,186 @@ impl LuaState {
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a LuaValue onto the Lua stack.
|
||||
pub fn push_value(&self, value: &LuaValue) {
|
||||
unsafe {
|
||||
match value {
|
||||
LuaValue::Nil => ffi::lua_pushnil(self.state),
|
||||
LuaValue::Bool(b) => ffi::lua_pushboolean(self.state, if *b { 1 } else { 0 }),
|
||||
LuaValue::Number(n) => ffi::lua_pushnumber(self.state, *n),
|
||||
LuaValue::String(s) => {
|
||||
let cs = CString::new(s.as_str()).unwrap();
|
||||
ffi::lua_pushstring(self.state, cs.as_ptr());
|
||||
}
|
||||
LuaValue::Table(pairs) => {
|
||||
let borrowed: Vec<(&str, LuaValue)> = pairs.iter()
|
||||
.map(|(k, v)| (k.as_str(), v.clone()))
|
||||
.collect();
|
||||
self.push_table(&borrowed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a Rust slice of key-value pairs as a Lua table onto the stack.
|
||||
pub fn push_table(&self, pairs: &[(&str, LuaValue)]) {
|
||||
unsafe {
|
||||
ffi::lua_createtable(self.state, 0, pairs.len() as i32);
|
||||
for (key, val) in pairs {
|
||||
self.push_value(val);
|
||||
let ckey = CString::new(*key).unwrap();
|
||||
ffi::lua_setfield(self.state, -2, ckey.as_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a Lua table at the given stack index into key-value pairs.
|
||||
pub fn read_table(&self, index: i32) -> Result<Vec<(String, LuaValue)>, String> {
|
||||
unsafe {
|
||||
if ffi::lua_type(self.state, index) != ffi::LUA_TTABLE {
|
||||
return Err("expected table".to_string());
|
||||
}
|
||||
let abs_idx = if index < 0 {
|
||||
ffi::lua_gettop(self.state) + index + 1
|
||||
} else {
|
||||
index
|
||||
};
|
||||
let mut result = Vec::new();
|
||||
ffi::lua_pushnil(self.state); // first key
|
||||
while ffi::lua_next(self.state, abs_idx) != 0 {
|
||||
// key at -2, value at -1
|
||||
let key = self.read_stack_as_string(-2);
|
||||
let val = self.read_stack_value(-1);
|
||||
result.push((key, val));
|
||||
ffi::lua_pop(self.state, 1); // pop value, keep key for next iteration
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a Vec3 as a Lua table {x=, y=, z=}.
|
||||
pub fn push_vec3(&self, v: [f32; 3]) {
|
||||
let pairs: Vec<(&str, LuaValue)> = vec![
|
||||
("x", LuaValue::Number(v[0] as f64)),
|
||||
("y", LuaValue::Number(v[1] as f64)),
|
||||
("z", LuaValue::Number(v[2] as f64)),
|
||||
];
|
||||
self.push_table(&pairs);
|
||||
}
|
||||
|
||||
/// Read a Vec3 table from the stack. Supports {x=, y=, z=} named fields
|
||||
/// or {[1]=, [2]=, [3]=} array-style fields.
|
||||
pub fn read_vec3(&self, index: i32) -> Result<[f32; 3], String> {
|
||||
unsafe {
|
||||
if ffi::lua_type(self.state, index) != ffi::LUA_TTABLE {
|
||||
return Err("expected table for vec3".to_string());
|
||||
}
|
||||
let abs_idx = if index < 0 {
|
||||
ffi::lua_gettop(self.state) + index + 1
|
||||
} else {
|
||||
index
|
||||
};
|
||||
|
||||
// Try named fields first
|
||||
let cx = CString::new("x").unwrap();
|
||||
let cy = CString::new("y").unwrap();
|
||||
let cz = CString::new("z").unwrap();
|
||||
|
||||
let tx = ffi::lua_getfield(self.state, abs_idx, cx.as_ptr());
|
||||
if tx == ffi::LUA_TNUMBER {
|
||||
let mut isnum = 0;
|
||||
let x = ffi::lua_tonumberx(self.state, -1, &mut isnum) as f32;
|
||||
ffi::lua_pop(self.state, 1);
|
||||
|
||||
ffi::lua_getfield(self.state, abs_idx, cy.as_ptr());
|
||||
let y = ffi::lua_tonumberx(self.state, -1, &mut isnum) as f32;
|
||||
ffi::lua_pop(self.state, 1);
|
||||
|
||||
ffi::lua_getfield(self.state, abs_idx, cz.as_ptr());
|
||||
let z = ffi::lua_tonumberx(self.state, -1, &mut isnum) as f32;
|
||||
ffi::lua_pop(self.state, 1);
|
||||
|
||||
return Ok([x, y, z]);
|
||||
}
|
||||
ffi::lua_pop(self.state, 1);
|
||||
|
||||
// Try array-style {v[1], v[2], v[3]}
|
||||
let mut vals = [0.0f32; 3];
|
||||
for i in 0..3 {
|
||||
ffi::lua_rawgeti(self.state, abs_idx, (i + 1) as ffi::lua_Integer);
|
||||
let mut isnum = 0;
|
||||
vals[i] = ffi::lua_tonumberx(self.state, -1, &mut isnum) as f32;
|
||||
ffi::lua_pop(self.state, 1);
|
||||
if isnum == 0 {
|
||||
return Err("vec3 array element is not a number".to_string());
|
||||
}
|
||||
}
|
||||
Ok(vals)
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a value from the Lua stack at the given index.
|
||||
pub fn read_stack_value(&self, index: i32) -> LuaValue {
|
||||
unsafe {
|
||||
match ffi::lua_type(self.state, index) {
|
||||
ffi::LUA_TNIL => LuaValue::Nil,
|
||||
ffi::LUA_TBOOLEAN => {
|
||||
LuaValue::Bool(ffi::lua_toboolean(self.state, index) != 0)
|
||||
}
|
||||
ffi::LUA_TNUMBER => {
|
||||
let mut isnum = 0;
|
||||
let n = ffi::lua_tonumberx(self.state, index, &mut isnum);
|
||||
LuaValue::Number(n)
|
||||
}
|
||||
ffi::LUA_TSTRING => {
|
||||
let ptr = ffi::lua_tostring(self.state, index);
|
||||
if ptr.is_null() {
|
||||
LuaValue::Nil
|
||||
} else {
|
||||
LuaValue::String(CStr::from_ptr(ptr).to_string_lossy().into_owned())
|
||||
}
|
||||
}
|
||||
ffi::LUA_TTABLE => {
|
||||
// Read recursively
|
||||
self.read_table(index).map(LuaValue::Table).unwrap_or(LuaValue::Nil)
|
||||
}
|
||||
_ => LuaValue::Nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the stack value at index as a string key (for table iteration).
|
||||
fn read_stack_as_string(&self, index: i32) -> String {
|
||||
unsafe {
|
||||
match ffi::lua_type(self.state, index) {
|
||||
ffi::LUA_TSTRING => {
|
||||
let ptr = ffi::lua_tostring(self.state, index);
|
||||
if ptr.is_null() {
|
||||
String::new()
|
||||
} else {
|
||||
CStr::from_ptr(ptr).to_string_lossy().into_owned()
|
||||
}
|
||||
}
|
||||
ffi::LUA_TNUMBER => {
|
||||
let mut isnum = 0;
|
||||
let n = ffi::lua_tonumberx(self.state, index, &mut isnum);
|
||||
// Format integer keys without decimal
|
||||
if n == (n as i64) as f64 {
|
||||
format!("{}", n as i64)
|
||||
} else {
|
||||
format!("{}", n)
|
||||
}
|
||||
}
|
||||
_ => String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-execute a Lua script file on the existing state (hot reload).
|
||||
pub fn reload_file(&mut self, path: &str) -> Result<(), String> {
|
||||
self.exec_file(path)
|
||||
}
|
||||
|
||||
/// Raw Lua state pointer (for advanced use).
|
||||
pub fn raw(&self) -> *mut ffi::lua_State {
|
||||
self.state
|
||||
@@ -194,4 +384,122 @@ mod tests {
|
||||
lua.exec("result = add_ten(5)").unwrap();
|
||||
assert_eq!(lua.get_global_number("result"), Some(15.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_read_table_roundtrip() {
|
||||
let lua = LuaState::new();
|
||||
let pairs: Vec<(&str, LuaValue)> = vec![
|
||||
("name", LuaValue::String("test".into())),
|
||||
("value", LuaValue::Number(42.0)),
|
||||
("active", LuaValue::Bool(true)),
|
||||
];
|
||||
lua.push_table(&pairs);
|
||||
unsafe { crate::ffi::lua_setglobal(lua.state, b"tbl\0".as_ptr() as *const _); }
|
||||
lua.exec("result_name = tbl.name; result_val = tbl.value").unwrap();
|
||||
assert_eq!(lua.get_global_string("result_name"), Some("test".to_string()));
|
||||
assert_eq!(lua.get_global_number("result_val"), Some(42.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_table_from_lua() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("test_table = { greeting = 'hello', count = 7 }").unwrap();
|
||||
unsafe {
|
||||
let name = std::ffi::CString::new("test_table").unwrap();
|
||||
crate::ffi::lua_getglobal(lua.state, name.as_ptr());
|
||||
let table = lua.read_table(-1).unwrap();
|
||||
crate::ffi::lua_pop(lua.state, 1);
|
||||
|
||||
let has_greeting = table.iter().any(|(k, v)| {
|
||||
k == "greeting" && *v == LuaValue::String("hello".into())
|
||||
});
|
||||
let has_count = table.iter().any(|(k, v)| {
|
||||
k == "count" && *v == LuaValue::Number(7.0)
|
||||
});
|
||||
assert!(has_greeting, "table should contain greeting='hello'");
|
||||
assert!(has_count, "table should contain count=7");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nested_table() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("nested = { inner = { val = 99 } }").unwrap();
|
||||
unsafe {
|
||||
let name = std::ffi::CString::new("nested").unwrap();
|
||||
crate::ffi::lua_getglobal(lua.state, name.as_ptr());
|
||||
let table = lua.read_table(-1).unwrap();
|
||||
crate::ffi::lua_pop(lua.state, 1);
|
||||
|
||||
let inner = table.iter().find(|(k, _)| k == "inner");
|
||||
assert!(inner.is_some());
|
||||
if let Some((_, LuaValue::Table(inner_pairs))) = inner {
|
||||
let has_val = inner_pairs.iter().any(|(k, v)| {
|
||||
k == "val" && *v == LuaValue::Number(99.0)
|
||||
});
|
||||
assert!(has_val);
|
||||
} else {
|
||||
panic!("inner should be a table");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec3_named_roundtrip() {
|
||||
let lua = LuaState::new();
|
||||
lua.push_vec3([1.0, 2.5, 3.0]);
|
||||
unsafe { crate::ffi::lua_setglobal(lua.state, b"v\0".as_ptr() as *const _); }
|
||||
|
||||
// Read it back
|
||||
unsafe {
|
||||
crate::ffi::lua_getglobal(lua.state, b"v\0".as_ptr() as *const _);
|
||||
let v = lua.read_vec3(-1).unwrap();
|
||||
crate::ffi::lua_pop(lua.state, 1);
|
||||
assert_eq!(v, [1.0, 2.5, 3.0]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec3_from_lua_named() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("v = { x = 10, y = 20, z = 30 }").unwrap();
|
||||
unsafe {
|
||||
crate::ffi::lua_getglobal(lua.state, b"v\0".as_ptr() as *const _);
|
||||
let v = lua.read_vec3(-1).unwrap();
|
||||
crate::ffi::lua_pop(lua.state, 1);
|
||||
assert_eq!(v, [10.0, 20.0, 30.0]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec3_from_lua_array() {
|
||||
let lua = LuaState::new();
|
||||
lua.exec("v = { 4, 5, 6 }").unwrap();
|
||||
unsafe {
|
||||
crate::ffi::lua_getglobal(lua.state, b"v\0".as_ptr() as *const _);
|
||||
let v = lua.read_vec3(-1).unwrap();
|
||||
crate::ffi::lua_pop(lua.state, 1);
|
||||
assert_eq!(v, [4.0, 5.0, 6.0]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hot_reload() {
|
||||
let dir = std::env::temp_dir().join("voltex_script_reload_test");
|
||||
let _ = std::fs::create_dir_all(&dir);
|
||||
let path = dir.join("reload_test.lua");
|
||||
|
||||
// Initial script
|
||||
std::fs::write(&path, "counter = 1").unwrap();
|
||||
let mut lua = LuaState::new();
|
||||
lua.exec_file(path.to_str().unwrap()).unwrap();
|
||||
assert_eq!(lua.get_global_number("counter"), Some(1.0));
|
||||
|
||||
// Modified script
|
||||
std::fs::write(&path, "counter = counter + 10").unwrap();
|
||||
lua.reload_file(path.to_str().unwrap()).unwrap();
|
||||
assert_eq!(lua.get_global_number("counter"), Some(11.0));
|
||||
|
||||
let _ = std::fs::remove_dir_all(&dir);
|
||||
}
|
||||
}
|
||||
|
||||
229
docs/DEFERRED.md
229
docs/DEFERRED.md
@@ -2,154 +2,159 @@
|
||||
|
||||
## Phase 2
|
||||
|
||||
- **PNG 디코더 자체 구현** — deflate + 필터링. 현재 BMP만 지원.
|
||||
- **JPG 디코더 자체 구현** — Huffman + DCT. 현재 미구현.
|
||||
- **glTF 파서** — OBJ만 지원 중.
|
||||
- ~~**PNG 디코더 자체 구현**~~ ✅ 완료.
|
||||
- ~~**JPG 디코더 자체 구현**~~ ✅ Baseline JPEG 완료.
|
||||
- ~~**glTF 파서**~~ ✅ glTF 2.0 / GLB 완료.
|
||||
- **JPG Progressive** — Baseline만 지원. Progressive JPEG 미구현.
|
||||
- **glTF 애니메이션/스킨** — 메시+머티리얼만 지원.
|
||||
|
||||
## Phase 3a
|
||||
|
||||
- **Archetype 기반 스토리지** → SparseSet 사용 중. 대규모 씬에서 성능 이슈 시 전환.
|
||||
- **시스템 스케줄러** — 의존성 기반 실행 순서/병렬 실행 미구현. 시스템은 함수 호출.
|
||||
- **쿼리 필터** — With, Without, Changed 미구현. query/query2만 존재.
|
||||
- **query3+** — query2까지만 있음.
|
||||
- ~~**시스템 스케줄러**~~ ✅ 순서 기반 Scheduler 완료.
|
||||
- ~~**쿼리 필터**~~ ✅ With/Without 완료.
|
||||
- ~~**query3+**~~ ✅ query3, query4 완료.
|
||||
- **Changed 필터** — 컴포넌트 변경 감지 미구현.
|
||||
- **의존성 기반 스케줄러** — 읽기/쓰기 의존성 자동 정렬/병렬 실행 미구현.
|
||||
|
||||
## Phase 3b
|
||||
|
||||
- **JSON 직렬화** → 커스텀 .vscn 텍스트 포맷 사용.
|
||||
- **바이너리 씬 포맷** — 미구현.
|
||||
- **임의 컴포넌트 직렬화** — Transform/Parent/Tag만 지원.
|
||||
- ~~**JSON 직렬화**~~ ✅ serialize_scene_json/deserialize_scene_json 완료.
|
||||
- ~~**바이너리 씬 포맷**~~ ✅ VSCN 바이너리 포맷 완료.
|
||||
- ~~**임의 컴포넌트 직렬화**~~ ✅ ComponentRegistry 등록 기반 완료.
|
||||
|
||||
## Phase 3c
|
||||
|
||||
- **비동기 로딩** — 동기 insert만.
|
||||
- **핫 리로드** — 파일 변경 감지 미구현.
|
||||
- ~~**비동기 로딩**~~ ✅ AssetLoader (워커 스레드) 완료.
|
||||
- ~~**핫 리로드**~~ ✅ FileWatcher (mtime 폴링) + replace_in_place 완료.
|
||||
|
||||
## Phase 4a
|
||||
|
||||
- **Metallic/Roughness/AO 텍스처 맵** → 파라미터 값만 사용. 텍스처 샘플링 미구현.
|
||||
- **Emissive 맵** — 미구현.
|
||||
- ~~**Metallic/Roughness/AO 텍스처 맵**~~ ✅ ORM 텍스처 샘플링 완료.
|
||||
- ~~**Emissive 맵**~~ ✅ Emissive 텍스처 + 셰이더 완료.
|
||||
|
||||
## Phase 4b
|
||||
|
||||
- **CSM (Cascaded Shadow Maps)** → 단일 캐스케이드만. 원거리 그림자 해상도 낮음.
|
||||
- **Point Light Shadow (큐브맵)** — 미구현.
|
||||
- **Spot Light Shadow** — 미구현.
|
||||
- **라이트 컬링** — 타일/클러스터 기반 미구현.
|
||||
- ~~**CSM (Cascaded Shadow Maps)**~~ ✅ 2-캐스케이드 CSM 완료.
|
||||
- ~~**Point Light Shadow (큐브맵)**~~ ✅ 큐브맵 depth + 6면 렌더링 완료.
|
||||
- ~~**Spot Light Shadow**~~ ✅ Perspective shadow map 완료.
|
||||
- ~~**라이트 컬링**~~ ✅ CPU 프러스텀 컬링 완료.
|
||||
|
||||
## Phase 4c
|
||||
|
||||
- **HDR 큐브맵 환경맵** → 프로시저럴 sky 함수로 대체.
|
||||
- **Irradiance/Prefiltered Map 컨볼루션** → 프로시저럴 근사.
|
||||
- **GPU 컴퓨트 BRDF LUT** → CPU 생성 (256x256).
|
||||
- ~~**HDR 큐브맵 환경맵**~~ → 프로시저럴 Hosek-Wilkie sky로 대체 완료.
|
||||
- ~~**Irradiance/Prefiltered Map 컨볼루션**~~ ✅ SH Irradiance (L2, 9계수) 완료.
|
||||
- ~~**GPU 컴퓨트 BRDF LUT**~~ ✅ 컴퓨트 셰이더 Rg16Float 완료.
|
||||
|
||||
## Phase 5-1
|
||||
|
||||
- **Capsule, Convex Hull 콜라이더** — Sphere + Box만 구현. 추후 GJK/EPA와 함께 추가.
|
||||
- **OBB (회전된 박스) 충돌** — 축 정렬 AABB만 지원. OBB는 GJK/EPA로 대체 예정.
|
||||
- **Incremental BVH 업데이트** — 매 프레임 전체 rebuild. 성능 이슈 시 incremental update 적용.
|
||||
- **연속 충돌 감지 (CCD)** — 이산 충돌만. 빠른 물체의 터널링 미처리.
|
||||
- ~~**Capsule, Convex Hull 콜라이더**~~ ✅ Capsule + GJK/EPA 완료. **Convex Hull 미구현.**
|
||||
- ~~**OBB (회전된 박스) 충돌**~~ ✅ GJK/EPA로 대체 완료.
|
||||
- ~~**Incremental BVH 업데이트**~~ ✅ refit() 완료.
|
||||
- ~~**연속 충돌 감지 (CCD)**~~ ✅ swept_sphere_vs_aabb 완료.
|
||||
|
||||
## Phase 5-2
|
||||
|
||||
- **각속도/회전 물리** — angular_velocity 필드만 존재, 적분 미구현. 관성 텐서 필요.
|
||||
- **마찰 (Coulomb)** — 미구현. 물체가 미끄러짐 없이 반발만.
|
||||
- **Sequential Impulse 솔버** — 단일 반복 충돌 응답만. 다중 물체 쌓기 불안정.
|
||||
- **Sleep/Island 시스템** — 정지 물체 최적화 미구현.
|
||||
- ~~**각속도/회전 물리**~~ ✅ 관성 텐서 + angular velocity 적분 완료.
|
||||
- ~~**마찰 (Coulomb)**~~ ✅ 완료.
|
||||
- ~~**Sequential Impulse 솔버**~~ ✅ N회 반복 솔버 완료.
|
||||
- ~~**Sleep/Island 시스템**~~ ✅ velocity threshold + timer 완료.
|
||||
|
||||
## Phase 5-3
|
||||
|
||||
- **Ray vs Plane, Triangle, Mesh** — 콜라이더 기반만 지원. 메시 레벨 레이캐스트 미구현.
|
||||
- **raycast_all (다중 hit)** — 가장 가까운 hit만 반환.
|
||||
- **BVH 조기 종료 최적화** — 모든 리프 검사 후 최소 t 선택. front-to-back 순회 미구현.
|
||||
- ~~**Ray vs Triangle**~~ ✅ Möller–Trumbore 완료.
|
||||
- **Ray vs Plane, Mesh** — 삼각형 단위만 지원. 전체 메시 순회 레이캐스트 미구현.
|
||||
- ~~**raycast_all (다중 hit)**~~ ✅ 거리순 정렬 완료.
|
||||
- ~~**BVH 조기 종료 최적화**~~ ✅ 재귀 트리 순회 + query_ray 완료.
|
||||
|
||||
## Phase 6-1
|
||||
|
||||
- **macOS/Linux 백엔드** — WASAPI(Windows)만 구현.
|
||||
- ~~**OGG/Vorbis 디코더**~~ ✅ OGG 컨테이너 + Vorbis 디코더 완료.
|
||||
- ~~**24-bit/32-bit WAV**~~ ✅ 완료.
|
||||
- **ECS 통합** — AudioSource 컴포넌트 미구현.
|
||||
- **비동기 로딩** — 동기 로딩만.
|
||||
|
||||
## Phase 6-2
|
||||
|
||||
- ~~**도플러 효과**~~ ✅ doppler_shift 완료.
|
||||
- **HRTF** — 미구현.
|
||||
- **Reverb/Echo** — 미구현.
|
||||
- **Occlusion** — 미구현.
|
||||
|
||||
## Phase 6-3
|
||||
|
||||
- **동적 그룹 생성** — 고정 4개만.
|
||||
- **그룹 간 라우팅/버스** — 미구현.
|
||||
- **이펙트 체인** — Reverb, EQ 등 미구현.
|
||||
- **비선형 페이드 커브** — 선형 페이드만.
|
||||
|
||||
## Phase 7-1
|
||||
|
||||
- **투명 오브젝트** — 디퍼드에서 처리 불가. 별도 포워드 패스 필요.
|
||||
- **G-Buffer 압축** — 미적용.
|
||||
- **Light Volumes** — 풀스크린 라이팅만.
|
||||
- **Stencil 최적화** — 미구현.
|
||||
|
||||
## Phase 7-2
|
||||
|
||||
- **Bilateral Blur** — SSGI 노이즈 제거 블러 미구현.
|
||||
- **반해상도 렌더링** — 풀 해상도 SSGI.
|
||||
- **Temporal Accumulation** — 미구현.
|
||||
- **Light Probes** — 미구현.
|
||||
|
||||
## Phase 7-3
|
||||
|
||||
- **RT Reflections** — 미구현.
|
||||
- **RT AO** — 미구현.
|
||||
- **Point/Spot Light RT shadows** — Directional만 구현.
|
||||
- **Soft RT shadows** — 단일 ray만.
|
||||
- **BLAS 업데이트** — 정적 지오메트리만.
|
||||
- **Fallback** — RT 미지원 GPU 폴백 미구현.
|
||||
|
||||
## Phase 7-4
|
||||
|
||||
- **TAA** — 미구현.
|
||||
- **SSR** — 미구현.
|
||||
- **Motion Blur, DOF** — 미구현.
|
||||
- **Auto Exposure** — 고정 exposure만.
|
||||
- **Bilateral Bloom Blur** — 단순 tent filter.
|
||||
|
||||
## Phase 8-1
|
||||
|
||||
- ~~**자동 내비메시 생성**~~ ✅ 복셀화 기반 NavMeshBuilder 완료.
|
||||
- ~~**String Pulling (Funnel)**~~ ✅ SSF 알고리즘 완료.
|
||||
- ~~**동적 장애물 회피**~~ ✅ velocity obstacle 기반 완료.
|
||||
- **ECS 통합** — AI 컴포넌트 미구현.
|
||||
- **내비메시 직렬화** — 미구현.
|
||||
|
||||
## Phase 8-2
|
||||
|
||||
- ~~**상태 동기화 (스냅샷)**~~ ✅ Snapshot + delta 압축 완료.
|
||||
- ~~**보간 / 예측**~~ ✅ InterpolationBuffer 완료.
|
||||
- **지연 보상** — 미구현.
|
||||
- ~~**신뢰성 계층**~~ ✅ ReliableChannel + OrderedChannel 완료.
|
||||
- **암호화 / 인증** — 미구현.
|
||||
|
||||
## Phase 8-3
|
||||
|
||||
- ~~**핫 리로드**~~ ✅ reload_file 완료.
|
||||
- ~~**엔진 API 노출**~~ ✅ 기본 API (spawn, position, entity_count, velocity, rotation, scale, destroy, find_by_tag) 완료.
|
||||
- ~~**Lua 테이블 ↔ Rust 구조체**~~ ✅ push_table/read_table/push_vec3/read_vec3 완료.
|
||||
- ~~**코루틴**~~ ✅ LuaCoroutine (create/resume/is_finished) 완료.
|
||||
- ~~**샌드박싱**~~ ✅ os/io/loadfile/dofile/require 차단 완료.
|
||||
|
||||
## Phase 8-4
|
||||
|
||||
- **씬 뷰포트** — 3D 렌더러 임베드 미구현.
|
||||
- **엔티티 인스펙터** — ECS 컴포넌트 편집 미구현.
|
||||
- **에셋 브라우저** — 파일 시스템 탐색 미구현.
|
||||
- **텍스트 입력** — 키보드 → 문자열 입력 미구현.
|
||||
- **스크롤, 드래그앤드롭** — 미구현.
|
||||
- ~~**텍스트 입력**~~ ✅ text_input 위젯 완료.
|
||||
- ~~**스크롤, 드래그앤드롭**~~ ✅ scroll_panel + drag/drop 완료.
|
||||
- **도킹, 탭, 윈도우 드래그** — 미구현.
|
||||
- **TTF 폰트** — 비트맵 고정폭만. 가변 크기 미지원.
|
||||
|
||||
## Phase 8-3
|
||||
|
||||
- **핫 리로드** — 파일 변경 감지 + Lua state 재로드 미구현.
|
||||
- **엔진 API 노출** — ECS, 물리, 오디오 등 Lua에서 접근 불가.
|
||||
- **Lua 테이블 ↔ Rust 구조체** — 복잡한 데이터 변환 미구현.
|
||||
- **코루틴** — Lua 코루틴 래핑 미구현.
|
||||
- **샌드박싱** — Lua 보안 제한 미구현.
|
||||
|
||||
## Phase 8-2
|
||||
|
||||
- **상태 동기화 (스냅샷)** — 미구현. 서버→클라이언트 월드 상태 전송.
|
||||
- **보간 / 예측** — 미구현. 클라이언트 측 스무딩.
|
||||
- **지연 보상** — 미구현. 서버 측 히트 판정 보정.
|
||||
- **신뢰성 계층** — 미구현. 패킷 재전송, 순서 보장.
|
||||
- **암호화 / 인증** — 미구현.
|
||||
|
||||
## Phase 8-1
|
||||
|
||||
- **자동 내비메시 생성** — Recast 스타일 복셀화 미구현. 수동 정의만.
|
||||
- **String Pulling (Funnel)** — 삼각형 중심점 경로만. 최적 경로 스무딩 미구현.
|
||||
- **동적 장애물 회피** — 정적 내비메시만. 런타임 장애물 미처리.
|
||||
- **ECS 통합** — AI 컴포넌트 미구현. 함수 직접 호출.
|
||||
- **내비메시 직렬화** — 미구현.
|
||||
|
||||
## Phase 7-4
|
||||
|
||||
- **TAA** — Temporal Anti-Aliasing 미구현. Motion vector 필요.
|
||||
- **SSR** — Screen-Space Reflections 미구현.
|
||||
- **Motion Blur, DOF** — 미구현.
|
||||
- **Auto Exposure** — 고정 exposure만. 적응형 노출 미구현.
|
||||
- **Bilateral Bloom Blur** — 단순 box/tent filter. Kawase blur 미적용.
|
||||
|
||||
## Phase 7-3
|
||||
|
||||
- **RT Reflections** — 미구현. BLAS/TLAS 인프라 재사용 가능.
|
||||
- **RT AO** — 미구현.
|
||||
- **Point/Spot Light RT shadows** — Directional만 구현.
|
||||
- **Soft RT shadows** — 단일 ray만. Multi-ray soft shadow 미구현.
|
||||
- **BLAS 업데이트** — 정적 지오메트리만. 동적 메시 변경 시 BLAS 재빌드 필요.
|
||||
- **Fallback** — RT 미지원 GPU에서 자동 PCF 폴백 미구현.
|
||||
|
||||
## Phase 7-2
|
||||
|
||||
- **Bilateral Blur** — SSGI 노이즈 제거 블러 미구현. 4x4 노이즈 타일링만.
|
||||
- **반해상도 렌더링** — 풀 해상도에서 SSGI 실행. 성능 최적화 미적용.
|
||||
- **Temporal Accumulation** — 프레임 간 누적 미구현. 매 프레임 독립 계산.
|
||||
- **Light Probes** — 베이크 기반 GI 미구현.
|
||||
|
||||
## Phase 7-1
|
||||
|
||||
- **투명 오브젝트** — 디퍼드에서 처리 불가. 별도 포워드 패스 필요.
|
||||
- **G-Buffer 압축** — Position을 depth에서 복원, Normal을 octahedral 인코딩 등 미적용.
|
||||
- **Light Volumes** — 풀스크린 라이팅만. 라이트별 sphere/cone 렌더 미구현.
|
||||
- **Stencil 최적화** — 미구현.
|
||||
|
||||
## Phase 6-3
|
||||
|
||||
- **동적 그룹 생성** — 고정 4개(Master/Bgm/Sfx/Voice)만. 런타임 추가 불가.
|
||||
- **그룹 간 라우팅/버스** — 미구현. 단순 Master → 개별 그룹 구조만.
|
||||
- **이펙트 체인** — Reverb, EQ 등 미구현.
|
||||
- **비선형 페이드 커브** — 선형 페이드만.
|
||||
|
||||
## Phase 6-2
|
||||
|
||||
- **도플러 효과** — 미구현. 상대 속도 기반 주파수 변조.
|
||||
- **HRTF** — 미구현. 헤드폰용 3D 정위.
|
||||
- **Reverb/Echo** — 미구현. 환경 반사음.
|
||||
- **Occlusion** — 미구현. 벽 뒤 소리 차단.
|
||||
|
||||
## Phase 6-1
|
||||
|
||||
- **macOS/Linux 백엔드** — WASAPI(Windows)만 구현. CoreAudio, ALSA 미구현.
|
||||
- **OGG/Vorbis 디코더** — WAV PCM 16-bit만 지원.
|
||||
- **24-bit/32-bit WAV** — 16-bit만 파싱.
|
||||
- **ECS 통합** — AudioSource 컴포넌트 미구현. AudioSystem 직접 호출.
|
||||
- **비동기 로딩** — 동기 로딩만.
|
||||
- **TTF 폰트** — 비트맵 고정폭만.
|
||||
|
||||
## 렌더링 한계
|
||||
|
||||
- **per-entity dynamic UBO** — 수천 개 이상은 인스턴싱 필요.
|
||||
- **max_bind_groups=4** — IBL을 shadow group에 합쳐서 해결. 추가 group 필요 시 리소스 합치거나 bindless 활용.
|
||||
- **max_bind_groups=4** — 리소스 합치기로 해결 중.
|
||||
|
||||
221
docs/STATUS.md
221
docs/STATUS.md
@@ -9,192 +9,157 @@
|
||||
- examples/triangle
|
||||
|
||||
### Phase 2: Rendering Basics
|
||||
- voltex_math: Vec2, Vec4, Mat4 (transforms, look_at, perspective, orthographic)
|
||||
- voltex_math: Vec2, Vec4, Mat4 (transforms, look_at, perspective, orthographic, inverse)
|
||||
- voltex_renderer: MeshVertex(+tangent), Mesh, depth buffer, OBJ parser, Camera, FpsController
|
||||
- voltex_renderer: Blinn-Phong shader, BMP texture loader, GpuTexture
|
||||
- voltex_renderer: PNG decoder (deflate + filter reconstruction)
|
||||
- voltex_renderer: JPG decoder (Baseline JPEG: Huffman, IDCT, MCU, YCbCr, subsampling)
|
||||
- voltex_renderer: glTF 2.0 / GLB parser (JSON parser, accessor extraction, PBR material)
|
||||
- examples/model_viewer
|
||||
|
||||
### Phase 3a: ECS
|
||||
- voltex_ecs: Entity(id+generation), SparseSet<T>, World(type-erased storage)
|
||||
- voltex_ecs: query<T>, query2<A,B>, Transform component
|
||||
- voltex_ecs: query<T>, query2<A,B>, query3, query4, Transform component
|
||||
- voltex_ecs: query_with/query_without, query2_with/query2_without (With/Without filters)
|
||||
- voltex_ecs: System trait, Scheduler (ordered execution)
|
||||
- examples/many_cubes (400 entities, dynamic UBO)
|
||||
|
||||
### Phase 3b: Scene Graph
|
||||
### Phase 3b: Scene Graph + Serialization
|
||||
- voltex_ecs: Parent/Children hierarchy, add_child/remove_child/despawn_recursive
|
||||
- voltex_ecs: WorldTransform propagation (top-down)
|
||||
- voltex_ecs: Scene serialization (.vscn text format), Tag component
|
||||
- voltex_ecs: Scene serialization (.vscn text, JSON, binary VSCN format), Tag component
|
||||
- voltex_ecs: ComponentRegistry (등록 기반 임의 컴포넌트 직렬화)
|
||||
- voltex_ecs: Mini JSON writer/parser (자체 구현)
|
||||
- examples/hierarchy_demo (solar system)
|
||||
|
||||
### Phase 3c: Asset Manager
|
||||
- voltex_asset: Handle<T>(generation), AssetStorage<T>(ref counting), Assets(type-erased)
|
||||
- voltex_asset: AssetLoader (워커 스레드 비동기 로딩, LoadState)
|
||||
- voltex_asset: FileWatcher (mtime 폴링 변경 감지), replace_in_place (핫 리로드)
|
||||
- examples/asset_demo
|
||||
|
||||
### Phase 4a: PBR Rendering
|
||||
- voltex_renderer: MaterialUniform (base_color, metallic, roughness, ao)
|
||||
- voltex_renderer: Cook-Torrance BRDF shader (GGX NDF + Smith geometry + Fresnel-Schlick)
|
||||
- voltex_renderer: ORM 텍스처 맵 (AO/Roughness/Metallic) + Emissive 텍스처
|
||||
- voltex_renderer: Full PBR texture layout (8 bindings: albedo+normal+ORM+emissive)
|
||||
- voltex_renderer: Procedural UV sphere generator
|
||||
- voltex_renderer: PBR pipeline (3→4 bind groups)
|
||||
- examples/pbr_demo (7x7 metallic/roughness sphere grid)
|
||||
|
||||
### Phase 4b-1: Multi-Light
|
||||
### Phase 4b: Lighting + Shadows
|
||||
- voltex_renderer: LightData (Directional/Point/Spot), LightsUniform (MAX_LIGHTS=16)
|
||||
- PBR shader: multi-light loop, point attenuation, spot cone falloff
|
||||
- examples/multi_light_demo (orbiting colored point lights)
|
||||
|
||||
### Phase 4b-2: Shadow Mapping
|
||||
- voltex_renderer: ShadowMap (2048x2048 depth), ShadowUniform, ShadowPassUniform
|
||||
- Shadow depth-only shader + pipeline (front-face cull, depth bias)
|
||||
- PBR shader: shadow map sampling + 3x3 PCF
|
||||
- examples/shadow_demo (directional light shadows)
|
||||
- voltex_renderer: ShadowMap (2048x2048, 3x3 PCF)
|
||||
- voltex_renderer: CascadedShadowMap (2 캐스케이드, 프러스텀 분할)
|
||||
- voltex_renderer: PointShadowMap (큐브맵 6면), SpotShadowMap (perspective)
|
||||
- voltex_renderer: Frustum 라이트 컬링 (Gribb-Hartmann, sphere vs 6 planes)
|
||||
- examples/shadow_demo, multi_light_demo
|
||||
|
||||
### Phase 4c: Normal Map + IBL
|
||||
- MeshVertex: tangent[4] added, computed in OBJ parser + sphere generator
|
||||
- voltex_renderer: BRDF LUT (CPU Monte Carlo, 256x256), IblResources
|
||||
- PBR shader: TBN normal mapping, procedural sky IBL, split-sum approximation
|
||||
- Texture bind group: albedo + normal map (pbr_texture_bind_group_layout)
|
||||
- IBL merged into shadow bind group (group 3) due to max_bind_groups=4
|
||||
- voltex_renderer: TBN normal mapping, Hosek-Wilkie procedural sky
|
||||
- voltex_renderer: SH Irradiance (L2, 9계수, CPU 계산, 셰이더 평가)
|
||||
- voltex_renderer: GPU Compute BRDF LUT (Rg16Float, 컴퓨트 셰이더)
|
||||
- voltex_renderer: SkyParams (sun_direction, turbidity)
|
||||
- examples/ibl_demo
|
||||
|
||||
### Phase 5-1: Collision Detection
|
||||
- voltex_math: AABB type (new, from_center_half_extents, intersects, merged, surface_area)
|
||||
- voltex_physics: Collider enum (Sphere, Box), ContactPoint
|
||||
- voltex_physics: BVH broad phase (median split, longest axis)
|
||||
- voltex_physics: Narrow phase — sphere_vs_sphere, sphere_vs_box, box_vs_box (SAT)
|
||||
- voltex_math: AABB, Ray
|
||||
- voltex_physics: Collider (Sphere, Box, Capsule), ContactPoint, GJK/EPA
|
||||
- voltex_physics: BVH (재귀 트리 순회, query_ray, refit incremental)
|
||||
- voltex_physics: detect_collisions(world) ECS integration
|
||||
|
||||
### Phase 5-2: Rigid Body Simulation
|
||||
- voltex_physics: RigidBody (mass, velocity, restitution), PhysicsConfig
|
||||
- voltex_physics: Semi-implicit Euler integration
|
||||
- voltex_physics: Impulse-based collision response + positional correction (Baumgarte)
|
||||
- voltex_physics: physics_step (integrate → detect → resolve)
|
||||
- voltex_physics: RigidBody (mass, velocity, angular_velocity, friction, sleep)
|
||||
- voltex_physics: Semi-implicit Euler (linear + angular, 관성 텐서)
|
||||
- voltex_physics: Sequential Impulse solver (N회 반복, angular impulse)
|
||||
- voltex_physics: Sleep/Island (velocity threshold + timer)
|
||||
- voltex_physics: CCD (swept_sphere_vs_aabb)
|
||||
|
||||
### Phase 5-3: Raycasting
|
||||
- voltex_math: Ray type (origin, direction, at)
|
||||
- voltex_physics: ray_vs_aabb, ray_vs_sphere, ray_vs_box
|
||||
- voltex_physics: raycast(world, ray, max_dist) BVH-accelerated ECS integration
|
||||
- voltex_physics: ray_vs_aabb, ray_vs_sphere, ray_vs_box, ray_vs_capsule, ray_vs_triangle
|
||||
- voltex_physics: raycast (closest hit), raycast_all (다중 hit, 거리순)
|
||||
- voltex_physics: BVH-accelerated query_ray
|
||||
|
||||
### Phase 6-1: Audio System Foundation
|
||||
- voltex_audio: WAV parser (PCM 16-bit, mono/stereo)
|
||||
- voltex_audio: AudioClip (f32 samples), mixing (volume, looping, channel conversion)
|
||||
- voltex_audio: WASAPI backend (Windows, shared mode, COM FFI)
|
||||
- voltex_audio: AudioSystem (channel-based audio thread, play/stop/volume)
|
||||
- examples/audio_demo (sine wave playback)
|
||||
- voltex_audio: WAV parser (PCM 16/24/32-bit, mono/stereo)
|
||||
- voltex_audio: OGG/Vorbis decoder (자체 구현)
|
||||
- voltex_audio: AudioClip, mixing, WASAPI backend, AudioSystem
|
||||
- examples/audio_demo
|
||||
|
||||
### Phase 6-2: 3D Audio
|
||||
- voltex_audio: Listener, SpatialParams
|
||||
- voltex_audio: distance_attenuation (inverse distance), stereo_pan (equal-power)
|
||||
- voltex_audio: mix_sounds spatial integration (per-sound attenuation + panning)
|
||||
- voltex_audio: play_3d, set_listener API
|
||||
- voltex_audio: distance_attenuation, stereo_pan, play_3d, set_listener
|
||||
- voltex_audio: Doppler effect (doppler_shift)
|
||||
|
||||
### Phase 6-3: Mixer
|
||||
- voltex_audio: MixGroup (Master, Bgm, Sfx, Voice), MixerState
|
||||
- voltex_audio: GroupState with linear fade (tick-based)
|
||||
- voltex_audio: effective_volume (group * master)
|
||||
- voltex_audio: set_group_volume, fade_group API
|
||||
- voltex_audio: MixGroup (Master/Bgm/Sfx/Voice), fade, effective_volume
|
||||
|
||||
### Phase 7-1: Deferred Rendering
|
||||
- voltex_renderer: GBuffer (4 MRT: Position/Normal/Albedo/Material + Depth)
|
||||
- voltex_renderer: G-Buffer pass shader (MRT output, TBN normal mapping)
|
||||
- voltex_renderer: Lighting pass shader (fullscreen triangle, Cook-Torrance BRDF, multi-light, shadow, IBL)
|
||||
- voltex_renderer: Deferred pipeline (gbuffer + lighting bind group layouts)
|
||||
- examples/deferred_demo (5x5 sphere grid + 8 orbiting point lights)
|
||||
|
||||
### Phase 7-2: SSGI (Screen-Space Global Illumination)
|
||||
- voltex_renderer: SsgiResources (hemisphere kernel 64 samples, 4x4 noise, output texture)
|
||||
- voltex_renderer: SSGI shader (SSAO + color bleeding in one fullscreen pass)
|
||||
- voltex_renderer: SSGI pipeline + bind group layouts
|
||||
- voltex_renderer: Lighting pass SSGI integration (ambient * ssgi_ao + indirect)
|
||||
- deferred_demo updated with 3-pass rendering (GBuffer → SSGI → Lighting)
|
||||
|
||||
### Phase 7-3: RT Shadows (Hardware Ray Tracing)
|
||||
- voltex_renderer: RtAccel (BLAS/TLAS acceleration structure management)
|
||||
- voltex_renderer: RT Shadow compute shader (ray query, directional light)
|
||||
- voltex_renderer: RT shadow pipeline + bind group layouts
|
||||
- voltex_renderer: Lighting pass RT shadow integration
|
||||
- deferred_demo updated with hardware RT shadows (4-pass: GBuffer → SSGI → RT Shadow → Lighting)
|
||||
|
||||
### Phase 7-4: Post Processing (HDR + Bloom + ACES)
|
||||
- voltex_renderer: HdrTarget (Rgba16Float), Lighting → HDR output
|
||||
- voltex_renderer: BloomResources (5-level mip chain, downsample/upsample)
|
||||
- voltex_renderer: Bloom shader (bright extract + tent filter upsample)
|
||||
- voltex_renderer: Tonemap shader (ACES filmic + bloom merge + gamma)
|
||||
- deferred_demo updated with full post-processing (7 passes)
|
||||
### Phase 7-1~7-4: Advanced Rendering
|
||||
- Deferred rendering (G-Buffer 4 MRT + Lighting pass)
|
||||
- SSGI (SSAO + color bleeding)
|
||||
- RT Shadows (BLAS/TLAS, compute shader ray query)
|
||||
- Post Processing (HDR + Bloom + ACES tonemap)
|
||||
|
||||
### Phase 8-1: AI System
|
||||
- voltex_ai: NavMesh (manual triangle mesh, find_triangle, edge/center queries)
|
||||
- voltex_ai: A* pathfinding on triangle graph (center-point path)
|
||||
- voltex_ai: Steering behaviors (seek, flee, arrive, wander, follow_path)
|
||||
- voltex_ai: NavMesh (manual + auto builder via voxelization)
|
||||
- voltex_ai: A* pathfinding + Funnel algorithm (String Pulling)
|
||||
- voltex_ai: Steering behaviors + Dynamic obstacle avoidance
|
||||
|
||||
### Phase 8-4: Immediate Mode UI (Editor Foundation)
|
||||
- voltex_editor: FontAtlas (8x12 비트맵 ASCII, 코드 생성)
|
||||
- voltex_editor: DrawList (정점/인덱스/커맨드), DrawVertex (Unorm8x4 color)
|
||||
- voltex_editor: UiContext (IMGUI 상태, hot/active, 커서 레이아웃)
|
||||
- voltex_editor: Widgets (text, button, slider, checkbox, panel)
|
||||
- voltex_editor: UiRenderer (wgpu 2D pipeline, alpha blending, orthographic)
|
||||
- examples/editor_demo (IMGUI 위젯 데모)
|
||||
### Phase 8-2: Networking
|
||||
- voltex_net: Packet protocol, UDP socket, NetServer/NetClient
|
||||
- voltex_net: ReliableChannel (sequence, ACK, retransmission, RTT)
|
||||
- voltex_net: OrderedChannel (in-order delivery)
|
||||
- voltex_net: Snapshot serialization + delta compression
|
||||
- voltex_net: InterpolationBuffer (client-side linear interpolation)
|
||||
|
||||
### Phase 8-3: Lua Scripting
|
||||
- voltex_script: Lua 5.4 내장 빌드 (cc crate)
|
||||
- voltex_script: Lua C API FFI 바인딩
|
||||
- voltex_script: LuaState 안전 래퍼 (exec, exec_file, register_fn, globals)
|
||||
- voltex_script: 기본 바인딩 (voltex_print)
|
||||
- voltex_script: Lua 5.4 내장 빌드, FFI 바인딩
|
||||
- voltex_script: LuaState (exec, register_fn, push_table/read_table, push_vec3/read_vec3)
|
||||
- voltex_script: LuaCoroutine (create/resume), Sandbox (os/io 차단)
|
||||
- voltex_script: Hot reload (reload_file), Engine API (spawn, position, velocity, rotation, scale, destroy, find_by_tag)
|
||||
|
||||
### Phase 8-2: Networking Foundation
|
||||
- voltex_net: Packet protocol (Connect, Accept, Disconnect, Ping, Pong, UserData)
|
||||
- voltex_net: Binary serialization/deserialization
|
||||
- voltex_net: Non-blocking UDP socket wrapper
|
||||
- voltex_net: NetServer (client management, broadcast)
|
||||
- voltex_net: NetClient (server connection, handshake)
|
||||
### Phase 8-4: Immediate Mode UI (Editor)
|
||||
- voltex_editor: FontAtlas (8x12 bitmap ASCII)
|
||||
- voltex_editor: DrawList, UiContext, UiRenderer
|
||||
- voltex_editor: Widgets (text, button, slider, checkbox, panel, text_input, scroll_panel)
|
||||
- voltex_editor: Drag and Drop (begin_drag, drop_target)
|
||||
- examples/editor_demo
|
||||
|
||||
## Crate 구조
|
||||
|
||||
```
|
||||
crates/
|
||||
├── voltex_math — Vec2, Vec3, Vec4, Mat4, AABB, Ray
|
||||
├── voltex_math — Vec2, Vec3, Vec4, Mat4(+inverse), AABB, Ray
|
||||
├── voltex_platform — VoltexWindow, InputState, GameTimer
|
||||
├── voltex_renderer — GPU, Mesh, OBJ, Camera, Material, PBR, Shadow, IBL, Sphere
|
||||
├── voltex_ecs — Entity, SparseSet, World, Transform, Hierarchy, Scene, WorldTransform
|
||||
├── voltex_asset — Handle<T>, AssetStorage<T>, Assets
|
||||
├── voltex_physics — Collider, ContactPoint, BvhTree, RigidBody, detect_collisions, physics_step, raycast
|
||||
├── voltex_audio — AudioClip, WAV parser, mixing, WASAPI backend, AudioSystem, MixGroup, spatial
|
||||
├── voltex_ai — NavMesh, A* pathfinding, steering behaviors
|
||||
├── voltex_net — UDP packets, NetServer, NetClient
|
||||
├── voltex_script — Lua 5.4 FFI, LuaState, scripting bindings
|
||||
└── voltex_editor — IMGUI framework, UiRenderer, widgets
|
||||
├── voltex_renderer — GPU, Mesh, OBJ, glTF, JPG, PNG, Camera, PBR, Shadow(CSM/Point/Spot), IBL(SH), Frustum
|
||||
├── voltex_ecs — Entity, SparseSet, World, Transform, Hierarchy, Scene(JSON/Binary), Scheduler
|
||||
├── voltex_asset — Handle<T>, AssetStorage<T>, Assets, AssetLoader, FileWatcher
|
||||
├── voltex_physics — Collider, GJK/EPA, BVH(refit), RigidBody(angular+sleep), CCD, raycast_all
|
||||
├── voltex_audio — WAV(16/24/32), OGG/Vorbis, mixing, WASAPI, Doppler, MixGroup, spatial
|
||||
├── voltex_ai — NavMesh(+builder), A*, Funnel, steering, obstacle avoidance
|
||||
├── voltex_net — UDP, NetServer/Client, Reliable/Ordered, Snapshot(delta), Interpolation
|
||||
├── voltex_script — Lua 5.4, LuaState, table interop, coroutine, sandbox, hot reload
|
||||
└── voltex_editor — IMGUI, text_input, scroll, drag&drop, UiRenderer
|
||||
```
|
||||
|
||||
## 테스트: 255개 전부 통과
|
||||
## 테스트: 485개 전부 통과
|
||||
|
||||
- voltex_asset: 14
|
||||
- voltex_audio: 35 (audio_clip 2 + wav 5 + mixing 11 + audio_system 2 + spatial 8 + mix_group 7)
|
||||
- voltex_ecs: 39
|
||||
- voltex_math: 37 (29 + AABB 6 + Ray 2)
|
||||
- voltex_physics: 52 (collider 2 + narrow 11 + bvh 5 + collision 7 + rigid_body 3 + integrator 3 + solver 5 + ray 10 + raycast 6)
|
||||
- voltex_math: 37
|
||||
- voltex_platform: 3
|
||||
- voltex_ai: 15 (navmesh 4 + pathfinding 5 + steering 6)
|
||||
- voltex_net: 8 (packet 7 + integration 1)
|
||||
- voltex_script: 9 (state 8 + bindings 1)
|
||||
- voltex_editor: 10 (font 2 + draw_list 3 + widgets 3 + layout 1 + renderer 1)
|
||||
- voltex_renderer: 33 (20 + SSGI 3 + RT 3 + bloom 3 + tonemap 4)
|
||||
- voltex_ecs: 83
|
||||
- voltex_asset: 22
|
||||
- voltex_renderer: 102
|
||||
- voltex_physics: 96
|
||||
- voltex_audio: 42
|
||||
- voltex_ai: 24
|
||||
- voltex_net: 36
|
||||
- voltex_script: 18
|
||||
- voltex_editor: 22
|
||||
|
||||
## Examples (12개)
|
||||
## Examples (12개 + survivor_game)
|
||||
|
||||
- triangle — Phase 1 삼각형
|
||||
- model_viewer — OBJ 큐브 + Blinn-Phong
|
||||
- many_cubes — 400 ECS 엔티티 렌더링
|
||||
- hierarchy_demo — 태양계 씬 그래프
|
||||
- asset_demo — Handle 기반 에셋 관리
|
||||
- pbr_demo — metallic/roughness 구체 그리드
|
||||
- multi_light_demo — 다중 색상 라이트
|
||||
- shadow_demo — Directional Light 그림자
|
||||
- ibl_demo — Normal map + IBL
|
||||
- audio_demo — 사인파 오디오 재생
|
||||
- deferred_demo — 디퍼드 렌더링 + 다중 포인트 라이트
|
||||
- editor_demo — IMGUI 위젯 데모
|
||||
|
||||
## 전체 완료!
|
||||
|
||||
스펙 참조: `docs/superpowers/specs/2026-03-24-voltex-engine-design.md`
|
||||
- triangle, model_viewer, many_cubes, hierarchy_demo, asset_demo
|
||||
- pbr_demo, multi_light_demo, shadow_demo, ibl_demo
|
||||
- audio_demo, deferred_demo, editor_demo
|
||||
- survivor_game (완성 게임)
|
||||
|
||||
## 간소화/미뤄진 항목
|
||||
|
||||
|
||||
959
docs/superpowers/plans/2026-03-25-phase2-gltf-parser.md
Normal file
959
docs/superpowers/plans/2026-03-25-phase2-gltf-parser.md
Normal file
@@ -0,0 +1,959 @@
|
||||
# glTF/GLB Parser Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Self-contained glTF 2.0 / GLB parser that returns mesh data compatible with existing `MeshVertex` and `ObjData` patterns.
|
||||
|
||||
**Architecture:** GLB header parser → mini JSON parser → accessor/bufferView extraction → vertex assembly with existing `compute_tangents`. Single file `gltf.rs` plus `json_parser.rs` for the JSON subset parser.
|
||||
|
||||
**Tech Stack:** Pure Rust, no external dependencies. Reuses `MeshVertex` from `vertex.rs` and `compute_tangents` from `obj.rs`.
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Mini JSON Parser
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_renderer/src/json_parser.rs`
|
||||
- Modify: `crates/voltex_renderer/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests for JSON parsing**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_null() {
|
||||
assert_eq!(parse_json("null").unwrap(), JsonValue::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bool() {
|
||||
assert_eq!(parse_json("true").unwrap(), JsonValue::Bool(true));
|
||||
assert_eq!(parse_json("false").unwrap(), JsonValue::Bool(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_number() {
|
||||
match parse_json("42").unwrap() {
|
||||
JsonValue::Number(n) => assert!((n - 42.0).abs() < 1e-10),
|
||||
other => panic!("Expected Number, got {:?}", other),
|
||||
}
|
||||
match parse_json("-3.14").unwrap() {
|
||||
JsonValue::Number(n) => assert!((n - (-3.14)).abs() < 1e-10),
|
||||
other => panic!("Expected Number, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string() {
|
||||
assert_eq!(parse_json("\"hello\"").unwrap(), JsonValue::String("hello".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string_escapes() {
|
||||
assert_eq!(
|
||||
parse_json(r#""hello\nworld""#).unwrap(),
|
||||
JsonValue::String("hello\nworld".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_array() {
|
||||
let val = parse_json("[1, 2, 3]").unwrap();
|
||||
match val {
|
||||
JsonValue::Array(arr) => assert_eq!(arr.len(), 3),
|
||||
other => panic!("Expected Array, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_object() {
|
||||
let val = parse_json(r#"{"name": "test", "value": 42}"#).unwrap();
|
||||
match val {
|
||||
JsonValue::Object(map) => {
|
||||
assert_eq!(map.len(), 2);
|
||||
assert_eq!(map[0].0, "name");
|
||||
}
|
||||
other => panic!("Expected Object, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_nested() {
|
||||
let json = r#"{"meshes": [{"name": "Cube", "primitives": [{"attributes": {"POSITION": 0}}]}]}"#;
|
||||
let val = parse_json(json).unwrap();
|
||||
assert!(matches!(val, JsonValue::Object(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_array() {
|
||||
assert_eq!(parse_json("[]").unwrap(), JsonValue::Array(vec![]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_object() {
|
||||
assert_eq!(parse_json("{}").unwrap(), JsonValue::Object(vec![]));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- json_parser::tests -v`
|
||||
Expected: FAIL — module not found
|
||||
|
||||
- [ ] **Step 3: Implement mini JSON parser**
|
||||
|
||||
```rust
|
||||
// crates/voltex_renderer/src/json_parser.rs
|
||||
|
||||
/// Minimal JSON parser for glTF. No external dependencies.
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum JsonValue {
|
||||
Null,
|
||||
Bool(bool),
|
||||
Number(f64),
|
||||
String(String),
|
||||
Array(Vec<JsonValue>),
|
||||
Object(Vec<(String, JsonValue)>), // preserve order
|
||||
}
|
||||
|
||||
impl JsonValue {
|
||||
pub fn as_object(&self) -> Option<&[(String, JsonValue)]> {
|
||||
match self { JsonValue::Object(v) => Some(v), _ => None }
|
||||
}
|
||||
pub fn as_array(&self) -> Option<&[JsonValue]> {
|
||||
match self { JsonValue::Array(v) => Some(v), _ => None }
|
||||
}
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self { JsonValue::String(s) => Some(s), _ => None }
|
||||
}
|
||||
pub fn as_f64(&self) -> Option<f64> {
|
||||
match self { JsonValue::Number(n) => Some(*n), _ => None }
|
||||
}
|
||||
pub fn as_u32(&self) -> Option<u32> {
|
||||
self.as_f64().map(|n| n as u32)
|
||||
}
|
||||
pub fn as_bool(&self) -> Option<bool> {
|
||||
match self { JsonValue::Bool(b) => Some(*b), _ => None }
|
||||
}
|
||||
pub fn get(&self, key: &str) -> Option<&JsonValue> {
|
||||
self.as_object()?.iter().find(|(k, _)| k == key).map(|(_, v)| v)
|
||||
}
|
||||
pub fn index(&self, i: usize) -> Option<&JsonValue> {
|
||||
self.as_array()?.get(i)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_json(input: &str) -> Result<JsonValue, String> {
|
||||
let mut parser = JsonParser::new(input);
|
||||
let val = parser.parse_value()?;
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
struct JsonParser<'a> {
|
||||
input: &'a [u8],
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> JsonParser<'a> {
|
||||
fn new(input: &'a str) -> Self {
|
||||
Self { input: input.as_bytes(), pos: 0 }
|
||||
}
|
||||
|
||||
fn skip_whitespace(&mut self) {
|
||||
while self.pos < self.input.len() {
|
||||
match self.input[self.pos] {
|
||||
b' ' | b'\t' | b'\n' | b'\r' => self.pos += 1,
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn peek(&self) -> Option<u8> {
|
||||
self.input.get(self.pos).copied()
|
||||
}
|
||||
|
||||
fn advance(&mut self) -> Result<u8, String> {
|
||||
if self.pos >= self.input.len() {
|
||||
return Err("Unexpected end of JSON".into());
|
||||
}
|
||||
let b = self.input[self.pos];
|
||||
self.pos += 1;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
fn expect(&mut self, ch: u8) -> Result<(), String> {
|
||||
let b = self.advance()?;
|
||||
if b != ch {
|
||||
return Err(format!("Expected '{}', got '{}'", ch as char, b as char));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_value(&mut self) -> Result<JsonValue, String> {
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b'"') => self.parse_string().map(JsonValue::String),
|
||||
Some(b'{') => self.parse_object(),
|
||||
Some(b'[') => self.parse_array(),
|
||||
Some(b't') => self.parse_literal("true", JsonValue::Bool(true)),
|
||||
Some(b'f') => self.parse_literal("false", JsonValue::Bool(false)),
|
||||
Some(b'n') => self.parse_literal("null", JsonValue::Null),
|
||||
Some(b'-') | Some(b'0'..=b'9') => self.parse_number(),
|
||||
Some(ch) => Err(format!("Unexpected character: '{}'", ch as char)),
|
||||
None => Err("Unexpected end of JSON".into()),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_string(&mut self) -> Result<String, String> {
|
||||
self.expect(b'"')?;
|
||||
let mut s = String::new();
|
||||
loop {
|
||||
let b = self.advance()?;
|
||||
match b {
|
||||
b'"' => return Ok(s),
|
||||
b'\\' => {
|
||||
let esc = self.advance()?;
|
||||
match esc {
|
||||
b'"' => s.push('"'),
|
||||
b'\\' => s.push('\\'),
|
||||
b'/' => s.push('/'),
|
||||
b'b' => s.push('\u{08}'),
|
||||
b'f' => s.push('\u{0C}'),
|
||||
b'n' => s.push('\n'),
|
||||
b'r' => s.push('\r'),
|
||||
b't' => s.push('\t'),
|
||||
b'u' => {
|
||||
let mut hex = String::new();
|
||||
for _ in 0..4 {
|
||||
hex.push(self.advance()? as char);
|
||||
}
|
||||
let code = u32::from_str_radix(&hex, 16)
|
||||
.map_err(|_| format!("Invalid unicode escape: {}", hex))?;
|
||||
if let Some(ch) = char::from_u32(code) {
|
||||
s.push(ch);
|
||||
}
|
||||
}
|
||||
_ => return Err(format!("Invalid escape: \\{}", esc as char)),
|
||||
}
|
||||
}
|
||||
_ => s.push(b as char),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_number(&mut self) -> Result<JsonValue, String> {
|
||||
let start = self.pos;
|
||||
if self.peek() == Some(b'-') { self.pos += 1; }
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
if self.pos < self.input.len() && self.input[self.pos] == b'.' {
|
||||
self.pos += 1;
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
}
|
||||
if self.pos < self.input.len() && (self.input[self.pos] == b'e' || self.input[self.pos] == b'E') {
|
||||
self.pos += 1;
|
||||
if self.pos < self.input.len() && (self.input[self.pos] == b'+' || self.input[self.pos] == b'-') {
|
||||
self.pos += 1;
|
||||
}
|
||||
while self.pos < self.input.len() && self.input[self.pos].is_ascii_digit() {
|
||||
self.pos += 1;
|
||||
}
|
||||
}
|
||||
let s = std::str::from_utf8(&self.input[start..self.pos])
|
||||
.map_err(|_| "Invalid UTF-8 in number")?;
|
||||
let n: f64 = s.parse().map_err(|_| format!("Invalid number: {}", s))?;
|
||||
Ok(JsonValue::Number(n))
|
||||
}
|
||||
|
||||
fn parse_object(&mut self) -> Result<JsonValue, String> {
|
||||
self.expect(b'{')?;
|
||||
self.skip_whitespace();
|
||||
let mut pairs = Vec::new();
|
||||
if self.peek() == Some(b'}') {
|
||||
self.pos += 1;
|
||||
return Ok(JsonValue::Object(pairs));
|
||||
}
|
||||
loop {
|
||||
self.skip_whitespace();
|
||||
let key = self.parse_string()?;
|
||||
self.skip_whitespace();
|
||||
self.expect(b':')?;
|
||||
let val = self.parse_value()?;
|
||||
pairs.push((key, val));
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b',') => { self.pos += 1; }
|
||||
Some(b'}') => { self.pos += 1; return Ok(JsonValue::Object(pairs)); }
|
||||
_ => return Err("Expected ',' or '}' in object".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_array(&mut self) -> Result<JsonValue, String> {
|
||||
self.expect(b'[')?;
|
||||
self.skip_whitespace();
|
||||
let mut items = Vec::new();
|
||||
if self.peek() == Some(b']') {
|
||||
self.pos += 1;
|
||||
return Ok(JsonValue::Array(items));
|
||||
}
|
||||
loop {
|
||||
let val = self.parse_value()?;
|
||||
items.push(val);
|
||||
self.skip_whitespace();
|
||||
match self.peek() {
|
||||
Some(b',') => { self.pos += 1; }
|
||||
Some(b']') => { self.pos += 1; return Ok(JsonValue::Array(items)); }
|
||||
_ => return Err("Expected ',' or ']' in array".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_literal(&mut self, expected: &str, value: JsonValue) -> Result<JsonValue, String> {
|
||||
for &b in expected.as_bytes() {
|
||||
let actual = self.advance()?;
|
||||
if actual != b {
|
||||
return Err(format!("Expected '{}', got '{}'", b as char, actual as char));
|
||||
}
|
||||
}
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Register in lib.rs: `pub mod json_parser;`
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- json_parser::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/json_parser.rs crates/voltex_renderer/src/lib.rs
|
||||
git commit -m "feat(renderer): add self-contained JSON parser for glTF support"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: GLB Header + Base64 Decoder
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_renderer/src/gltf.rs`
|
||||
- Modify: `crates/voltex_renderer/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests for GLB header parsing and base64**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_glb_header_magic() {
|
||||
// Invalid magic
|
||||
let data = [0u8; 12];
|
||||
assert!(parse_gltf(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_glb_header_version() {
|
||||
// Valid magic but wrong version
|
||||
let mut data = Vec::new();
|
||||
data.extend_from_slice(&0x46546C67u32.to_le_bytes()); // magic "glTF"
|
||||
data.extend_from_slice(&1u32.to_le_bytes()); // version 1 (we need 2)
|
||||
data.extend_from_slice(&12u32.to_le_bytes()); // length
|
||||
assert!(parse_gltf(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base64_decode() {
|
||||
let encoded = "SGVsbG8="; // "Hello"
|
||||
let decoded = decode_base64(encoded).unwrap();
|
||||
assert_eq!(decoded, b"Hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base64_decode_no_padding() {
|
||||
let encoded = "SGVsbG8"; // "Hello" without padding
|
||||
let decoded = decode_base64(encoded).unwrap();
|
||||
assert_eq!(decoded, b"Hello");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
- [ ] **Step 3: Implement GLB parser skeleton and base64 decoder**
|
||||
|
||||
```rust
|
||||
// crates/voltex_renderer/src/gltf.rs
|
||||
|
||||
use crate::json_parser::{self, JsonValue};
|
||||
use crate::vertex::MeshVertex;
|
||||
use crate::obj::compute_tangents;
|
||||
|
||||
pub struct GltfData {
|
||||
pub meshes: Vec<GltfMesh>,
|
||||
}
|
||||
|
||||
pub struct GltfMesh {
|
||||
pub vertices: Vec<MeshVertex>,
|
||||
pub indices: Vec<u32>,
|
||||
pub name: Option<String>,
|
||||
pub material: Option<GltfMaterial>,
|
||||
}
|
||||
|
||||
pub struct GltfMaterial {
|
||||
pub base_color: [f32; 4],
|
||||
pub metallic: f32,
|
||||
pub roughness: f32,
|
||||
}
|
||||
|
||||
const GLB_MAGIC: u32 = 0x46546C67;
|
||||
const GLB_VERSION: u32 = 2;
|
||||
const CHUNK_JSON: u32 = 0x4E4F534A;
|
||||
const CHUNK_BIN: u32 = 0x004E4942;
|
||||
|
||||
pub fn parse_gltf(data: &[u8]) -> Result<GltfData, String> {
|
||||
if data.len() < 4 {
|
||||
return Err("Data too short".into());
|
||||
}
|
||||
|
||||
// Detect format: GLB (binary) or JSON
|
||||
let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
|
||||
if magic == GLB_MAGIC {
|
||||
parse_glb(data)
|
||||
} else if data[0] == b'{' {
|
||||
parse_gltf_json(data)
|
||||
} else {
|
||||
Err("Unknown glTF format: not GLB or JSON".into())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_glb(data: &[u8]) -> Result<GltfData, String> {
|
||||
if data.len() < 12 {
|
||||
return Err("GLB header too short".into());
|
||||
}
|
||||
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
|
||||
if version != GLB_VERSION {
|
||||
return Err(format!("Unsupported GLB version: {} (expected 2)", version));
|
||||
}
|
||||
let _total_len = u32::from_le_bytes([data[8], data[9], data[10], data[11]]) as usize;
|
||||
|
||||
// Parse chunks
|
||||
let mut pos = 12;
|
||||
let mut json_str = String::new();
|
||||
let mut bin_data: Vec<u8> = Vec::new();
|
||||
|
||||
while pos + 8 <= data.len() {
|
||||
let chunk_len = u32::from_le_bytes([data[pos], data[pos+1], data[pos+2], data[pos+3]]) as usize;
|
||||
let chunk_type = u32::from_le_bytes([data[pos+4], data[pos+5], data[pos+6], data[pos+7]]);
|
||||
pos += 8;
|
||||
|
||||
if pos + chunk_len > data.len() {
|
||||
return Err("Chunk extends past data".into());
|
||||
}
|
||||
|
||||
match chunk_type {
|
||||
CHUNK_JSON => {
|
||||
json_str = std::str::from_utf8(&data[pos..pos + chunk_len])
|
||||
.map_err(|_| "Invalid UTF-8 in JSON chunk")?
|
||||
.to_string();
|
||||
}
|
||||
CHUNK_BIN => {
|
||||
bin_data = data[pos..pos + chunk_len].to_vec();
|
||||
}
|
||||
_ => {} // skip unknown chunks
|
||||
}
|
||||
pos += chunk_len;
|
||||
// Chunks are 4-byte aligned
|
||||
pos = (pos + 3) & !3;
|
||||
}
|
||||
|
||||
if json_str.is_empty() {
|
||||
return Err("No JSON chunk found in GLB".into());
|
||||
}
|
||||
|
||||
let json = json_parser::parse_json(&json_str)?;
|
||||
let buffers = vec![bin_data]; // GLB has one implicit binary buffer
|
||||
extract_meshes(&json, &buffers)
|
||||
}
|
||||
|
||||
fn parse_gltf_json(data: &[u8]) -> Result<GltfData, String> {
|
||||
let json_str = std::str::from_utf8(data).map_err(|_| "Invalid UTF-8")?;
|
||||
let json = json_parser::parse_json(json_str)?;
|
||||
|
||||
// Resolve buffers (embedded base64 URIs)
|
||||
let mut buffers = Vec::new();
|
||||
if let Some(bufs) = json.get("buffers").and_then(|v| v.as_array()) {
|
||||
for buf in bufs {
|
||||
if let Some(uri) = buf.get("uri").and_then(|v| v.as_str()) {
|
||||
if let Some(b64) = uri.strip_prefix("data:application/octet-stream;base64,") {
|
||||
buffers.push(decode_base64(b64)?);
|
||||
} else if let Some(b64) = uri.strip_prefix("data:application/gltf-buffer;base64,") {
|
||||
buffers.push(decode_base64(b64)?);
|
||||
} else {
|
||||
return Err(format!("External buffer URIs not supported: {}", uri));
|
||||
}
|
||||
} else {
|
||||
buffers.push(Vec::new());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extract_meshes(&json, &buffers)
|
||||
}
|
||||
|
||||
fn decode_base64(input: &str) -> Result<Vec<u8>, String> {
|
||||
let table = |c: u8| -> Result<u8, String> {
|
||||
match c {
|
||||
b'A'..=b'Z' => Ok(c - b'A'),
|
||||
b'a'..=b'z' => Ok(c - b'a' + 26),
|
||||
b'0'..=b'9' => Ok(c - b'0' + 52),
|
||||
b'+' => Ok(62),
|
||||
b'/' => Ok(63),
|
||||
b'=' => Ok(0), // padding
|
||||
_ => Err(format!("Invalid base64 character: {}", c as char)),
|
||||
}
|
||||
};
|
||||
|
||||
let bytes: Vec<u8> = input.bytes().filter(|&b| b != b'\n' && b != b'\r' && b != b' ').collect();
|
||||
let mut out = Vec::with_capacity(bytes.len() * 3 / 4);
|
||||
|
||||
for chunk in bytes.chunks(4) {
|
||||
let b0 = table(chunk[0])?;
|
||||
let b1 = if chunk.len() > 1 { table(chunk[1])? } else { 0 };
|
||||
let b2 = if chunk.len() > 2 { table(chunk[2])? } else { 0 };
|
||||
let b3 = if chunk.len() > 3 { table(chunk[3])? } else { 0 };
|
||||
|
||||
out.push((b0 << 2) | (b1 >> 4));
|
||||
if chunk.len() > 2 && chunk[2] != b'=' {
|
||||
out.push((b1 << 4) | (b2 >> 2));
|
||||
}
|
||||
if chunk.len() > 3 && chunk[3] != b'=' {
|
||||
out.push((b2 << 6) | b3);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
```
|
||||
|
||||
Register in lib.rs:
|
||||
```rust
|
||||
pub mod gltf;
|
||||
pub use gltf::{parse_gltf, GltfData, GltfMesh, GltfMaterial};
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- gltf::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/gltf.rs crates/voltex_renderer/src/lib.rs
|
||||
git commit -m "feat(renderer): add GLB header parser and base64 decoder for glTF"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Accessor/BufferView Data Extraction
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/gltf.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests for accessor reading**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_read_f32_accessor() {
|
||||
// Simulate a buffer with 3 float32 values
|
||||
let buffer: Vec<u8> = [1.0f32, 2.0, 3.0].iter()
|
||||
.flat_map(|f| f.to_le_bytes())
|
||||
.collect();
|
||||
let data = read_floats(&buffer, 0, 3);
|
||||
assert_eq!(data, vec![1.0, 2.0, 3.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_u16_indices() {
|
||||
let buffer: Vec<u8> = [0u16, 1, 2].iter()
|
||||
.flat_map(|i| i.to_le_bytes())
|
||||
.collect();
|
||||
let indices = read_indices_u16(&buffer, 0, 3);
|
||||
assert_eq!(indices, vec![0u32, 1, 2]);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
- [ ] **Step 3: Implement accessor reading and mesh extraction**
|
||||
|
||||
```rust
|
||||
fn extract_meshes(json: &JsonValue, buffers: &[Vec<u8>]) -> Result<GltfData, String> {
|
||||
let accessors = json.get("accessors").and_then(|v| v.as_array()).unwrap_or(&[]);
|
||||
let buffer_views = json.get("bufferViews").and_then(|v| v.as_array()).unwrap_or(&[]);
|
||||
let materials_json = json.get("materials").and_then(|v| v.as_array());
|
||||
|
||||
let mut meshes = Vec::new();
|
||||
|
||||
let mesh_list = json.get("meshes").and_then(|v| v.as_array())
|
||||
.ok_or("No meshes in glTF")?;
|
||||
|
||||
for mesh_val in mesh_list {
|
||||
let name = mesh_val.get("name").and_then(|v| v.as_str()).map(|s| s.to_string());
|
||||
let primitives = mesh_val.get("primitives").and_then(|v| v.as_array())
|
||||
.ok_or("Mesh has no primitives")?;
|
||||
|
||||
for prim in primitives {
|
||||
let attrs = prim.get("attributes").and_then(|v| v.as_object())
|
||||
.ok_or("Primitive has no attributes")?;
|
||||
|
||||
// Read position data (required)
|
||||
let pos_idx = attrs.iter().find(|(k, _)| k == "POSITION")
|
||||
.and_then(|(_, v)| v.as_u32())
|
||||
.ok_or("Missing POSITION attribute")? as usize;
|
||||
let positions = read_accessor_vec3(accessors, buffer_views, buffers, pos_idx)?;
|
||||
|
||||
// Read normals (optional)
|
||||
let normals = if let Some(idx) = attrs.iter().find(|(k, _)| k == "NORMAL").and_then(|(_, v)| v.as_u32()) {
|
||||
read_accessor_vec3(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
vec![[0.0, 1.0, 0.0]; positions.len()]
|
||||
};
|
||||
|
||||
// Read UVs (optional)
|
||||
let uvs = if let Some(idx) = attrs.iter().find(|(k, _)| k == "TEXCOORD_0").and_then(|(_, v)| v.as_u32()) {
|
||||
read_accessor_vec2(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
vec![[0.0, 0.0]; positions.len()]
|
||||
};
|
||||
|
||||
// Read tangents (optional)
|
||||
let tangents = if let Some(idx) = attrs.iter().find(|(k, _)| k == "TANGENT").and_then(|(_, v)| v.as_u32()) {
|
||||
Some(read_accessor_vec4(accessors, buffer_views, buffers, idx as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Read indices
|
||||
let indices = if let Some(idx) = prim.get("indices").and_then(|v| v.as_u32()) {
|
||||
read_accessor_indices(accessors, buffer_views, buffers, idx as usize)?
|
||||
} else {
|
||||
// No indices — generate sequential
|
||||
(0..positions.len() as u32).collect()
|
||||
};
|
||||
|
||||
// Assemble vertices
|
||||
let mut vertices: Vec<MeshVertex> = Vec::with_capacity(positions.len());
|
||||
for i in 0..positions.len() {
|
||||
vertices.push(MeshVertex {
|
||||
position: positions[i],
|
||||
normal: normals[i],
|
||||
uv: uvs[i],
|
||||
tangent: tangents.as_ref().map_or([0.0; 4], |t| t[i]),
|
||||
});
|
||||
}
|
||||
|
||||
// Compute tangents if not provided
|
||||
if tangents.is_none() {
|
||||
compute_tangents(&mut vertices, &indices);
|
||||
}
|
||||
|
||||
// Read material
|
||||
let material = prim.get("material")
|
||||
.and_then(|v| v.as_u32())
|
||||
.and_then(|idx| materials_json?.get(idx as usize))
|
||||
.and_then(|mat| extract_material(mat));
|
||||
|
||||
meshes.push(GltfMesh { vertices, indices, name: name.clone(), material });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GltfData { meshes })
|
||||
}
|
||||
|
||||
fn get_buffer_data<'a>(
|
||||
accessor: &JsonValue,
|
||||
buffer_views: &[JsonValue],
|
||||
buffers: &'a [Vec<u8>],
|
||||
) -> Result<(&'a [u8], usize), String> {
|
||||
let bv_idx = accessor.get("bufferView").and_then(|v| v.as_u32())
|
||||
.ok_or("Accessor missing bufferView")? as usize;
|
||||
let bv = buffer_views.get(bv_idx).ok_or("BufferView index out of range")?;
|
||||
let buf_idx = bv.get("buffer").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let bv_offset = bv.get("byteOffset").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let acc_offset = accessor.get("byteOffset").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let buffer = buffers.get(buf_idx).ok_or("Buffer index out of range")?;
|
||||
let offset = bv_offset + acc_offset;
|
||||
Ok((buffer, offset))
|
||||
}
|
||||
|
||||
fn read_accessor_vec3(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 3]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 12;
|
||||
if o + 12 > buffer.len() { return Err("Buffer overflow reading vec3".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
let z = f32::from_le_bytes([buffer[o+8], buffer[o+9], buffer[o+10], buffer[o+11]]);
|
||||
result.push([x, y, z]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_vec2(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 2]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 8;
|
||||
if o + 8 > buffer.len() { return Err("Buffer overflow reading vec2".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
result.push([x, y]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_vec4(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[f32; 4]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 16;
|
||||
if o + 16 > buffer.len() { return Err("Buffer overflow reading vec4".into()); }
|
||||
let x = f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]);
|
||||
let y = f32::from_le_bytes([buffer[o+4], buffer[o+5], buffer[o+6], buffer[o+7]]);
|
||||
let z = f32::from_le_bytes([buffer[o+8], buffer[o+9], buffer[o+10], buffer[o+11]]);
|
||||
let w = f32::from_le_bytes([buffer[o+12], buffer[o+13], buffer[o+14], buffer[o+15]]);
|
||||
result.push([x, y, z, w]);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_indices(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<u32>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let comp_type = acc.get("componentType").and_then(|v| v.as_u32()).ok_or("Missing componentType")?;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
|
||||
let mut result = Vec::with_capacity(count);
|
||||
match comp_type {
|
||||
5121 => { // UNSIGNED_BYTE
|
||||
for i in 0..count {
|
||||
result.push(buffer[offset + i] as u32);
|
||||
}
|
||||
}
|
||||
5123 => { // UNSIGNED_SHORT
|
||||
for i in 0..count {
|
||||
let o = offset + i * 2;
|
||||
result.push(u16::from_le_bytes([buffer[o], buffer[o+1]]) as u32);
|
||||
}
|
||||
}
|
||||
5125 => { // UNSIGNED_INT
|
||||
for i in 0..count {
|
||||
let o = offset + i * 4;
|
||||
result.push(u32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]));
|
||||
}
|
||||
}
|
||||
_ => return Err(format!("Unsupported index component type: {}", comp_type)),
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn extract_material(mat: &JsonValue) -> Option<GltfMaterial> {
|
||||
let pbr = mat.get("pbrMetallicRoughness")?;
|
||||
let base_color = if let Some(arr) = pbr.get("baseColorFactor").and_then(|v| v.as_array()) {
|
||||
[
|
||||
arr.get(0).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(3).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
]
|
||||
} else {
|
||||
[1.0, 1.0, 1.0, 1.0]
|
||||
};
|
||||
let metallic = pbr.get("metallicFactor").and_then(|v| v.as_f64()).unwrap_or(1.0) as f32;
|
||||
let roughness = pbr.get("roughnessFactor").and_then(|v| v.as_f64()).unwrap_or(1.0) as f32;
|
||||
Some(GltfMaterial { base_color, metallic, roughness })
|
||||
}
|
||||
|
||||
// Helper functions for tests
|
||||
fn read_floats(buffer: &[u8], offset: usize, count: usize) -> Vec<f32> {
|
||||
(0..count).map(|i| {
|
||||
let o = offset + i * 4;
|
||||
f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]])
|
||||
}).collect()
|
||||
}
|
||||
|
||||
fn read_indices_u16(buffer: &[u8], offset: usize, count: usize) -> Vec<u32> {
|
||||
(0..count).map(|i| {
|
||||
let o = offset + i * 2;
|
||||
u16::from_le_bytes([buffer[o], buffer[o+1]]) as u32
|
||||
}).collect()
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- gltf::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/gltf.rs
|
||||
git commit -m "feat(renderer): add glTF accessor/bufferView extraction and mesh assembly"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: GLB Integration Test with Synthetic Triangle
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/gltf.rs`
|
||||
|
||||
- [ ] **Step 1: Write integration test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_parse_minimal_glb() {
|
||||
let glb = build_minimal_glb_triangle();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
assert_eq!(data.meshes.len(), 1);
|
||||
let mesh = &data.meshes[0];
|
||||
assert_eq!(mesh.vertices.len(), 3);
|
||||
assert_eq!(mesh.indices.len(), 3);
|
||||
// Verify positions
|
||||
assert_eq!(mesh.vertices[0].position, [0.0, 0.0, 0.0]);
|
||||
assert_eq!(mesh.vertices[1].position, [1.0, 0.0, 0.0]);
|
||||
assert_eq!(mesh.vertices[2].position, [0.0, 1.0, 0.0]);
|
||||
}
|
||||
|
||||
/// Build a minimal GLB with one triangle.
|
||||
fn build_minimal_glb_triangle() -> Vec<u8> {
|
||||
// Binary buffer: 3 positions (vec3) + 3 indices (u16)
|
||||
let mut bin = Vec::new();
|
||||
// Positions: 3 * vec3 = 36 bytes
|
||||
for &v in &[0.0f32, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0] {
|
||||
bin.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
// Indices: 3 * u16 = 6 bytes + 2 padding = 8 bytes
|
||||
for &i in &[0u16, 1, 2] {
|
||||
bin.extend_from_slice(&i.to_le_bytes());
|
||||
}
|
||||
bin.extend_from_slice(&[0, 0]); // padding to 4-byte alignment
|
||||
|
||||
let json_str = format!(r#"{{
|
||||
"asset": {{"version": "2.0"}},
|
||||
"buffers": [{{"byteLength": {}}}],
|
||||
"bufferViews": [
|
||||
{{"buffer": 0, "byteOffset": 0, "byteLength": 36}},
|
||||
{{"buffer": 0, "byteOffset": 36, "byteLength": 6}}
|
||||
],
|
||||
"accessors": [
|
||||
{{"bufferView": 0, "componentType": 5126, "count": 3, "type": "VEC3",
|
||||
"max": [1.0, 1.0, 0.0], "min": [0.0, 0.0, 0.0]}},
|
||||
{{"bufferView": 1, "componentType": 5123, "count": 3, "type": "SCALAR"}}
|
||||
],
|
||||
"meshes": [{{
|
||||
"name": "Triangle",
|
||||
"primitives": [{{
|
||||
"attributes": {{"POSITION": 0}},
|
||||
"indices": 1
|
||||
}}]
|
||||
}}]
|
||||
}}"#, bin.len());
|
||||
|
||||
let json_bytes = json_str.as_bytes();
|
||||
// Pad JSON to 4-byte alignment
|
||||
let json_padded_len = (json_bytes.len() + 3) & !3;
|
||||
let mut json_padded = json_bytes.to_vec();
|
||||
while json_padded.len() < json_padded_len {
|
||||
json_padded.push(b' ');
|
||||
}
|
||||
|
||||
let total_len = 12 + 8 + json_padded.len() + 8 + bin.len();
|
||||
let mut glb = Vec::with_capacity(total_len);
|
||||
|
||||
// Header
|
||||
glb.extend_from_slice(&0x46546C67u32.to_le_bytes()); // magic
|
||||
glb.extend_from_slice(&2u32.to_le_bytes()); // version
|
||||
glb.extend_from_slice(&(total_len as u32).to_le_bytes());
|
||||
|
||||
// JSON chunk
|
||||
glb.extend_from_slice(&(json_padded.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x4E4F534Au32.to_le_bytes()); // "JSON"
|
||||
glb.extend_from_slice(&json_padded);
|
||||
|
||||
// BIN chunk
|
||||
glb.extend_from_slice(&(bin.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x004E4942u32.to_le_bytes()); // "BIN\0"
|
||||
glb.extend_from_slice(&bin);
|
||||
|
||||
glb
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run test**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- gltf::tests::test_parse_minimal_glb -v`
|
||||
Expected: PASS
|
||||
|
||||
- [ ] **Step 3: Add material test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_parse_glb_with_material() {
|
||||
// Same triangle but with a material
|
||||
let glb = build_glb_with_material();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
let mesh = &data.meshes[0];
|
||||
let mat = mesh.material.as_ref().unwrap();
|
||||
assert!((mat.base_color[0] - 1.0).abs() < 0.01);
|
||||
assert!((mat.metallic - 0.5).abs() < 0.01);
|
||||
assert!((mat.roughness - 0.8).abs() < 0.01);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run all glTF tests**
|
||||
|
||||
Run: `cargo test --package voltex_renderer -- gltf::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Run full workspace build**
|
||||
|
||||
Run: `cargo build --workspace`
|
||||
Expected: BUILD SUCCESS
|
||||
|
||||
- [ ] **Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/gltf.rs
|
||||
git commit -m "feat(renderer): complete glTF/GLB parser with mesh and material extraction"
|
||||
```
|
||||
1012
docs/superpowers/plans/2026-03-25-phase2-jpg-decoder.md
Normal file
1012
docs/superpowers/plans/2026-03-25-phase2-jpg-decoder.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,441 @@
|
||||
# ECS Query Filters + System Scheduler Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add `With<T>` / `Without<T>` query filters and a simple ordered system scheduler to voltex_ecs.
|
||||
|
||||
**Architecture:** Query filters use existing `SparseSet::contains()` for per-entity filtering. Scheduler stores `Box<dyn System>` and runs them in registration order. `fn(&mut World)` auto-implements `System`.
|
||||
|
||||
**Tech Stack:** Pure Rust, no external dependencies. Extends existing `World` in `world.rs`, new `scheduler.rs`.
|
||||
|
||||
---
|
||||
|
||||
### Task 1: `has_component<T>` Helper on World
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_ecs/src/world.rs`
|
||||
|
||||
- [ ] **Step 1: Write test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_has_component() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Position { x: 1.0, y: 2.0 });
|
||||
assert!(world.has_component::<Position>(e));
|
||||
assert!(!world.has_component::<Velocity>(e));
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run test to verify failure**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- world::tests::test_has_component -v`
|
||||
Expected: FAIL — method `has_component` not found
|
||||
|
||||
- [ ] **Step 3: Implement**
|
||||
|
||||
Add to `impl World` in `crates/voltex_ecs/src/world.rs`:
|
||||
|
||||
```rust
|
||||
pub fn has_component<T: 'static>(&self, entity: Entity) -> bool {
|
||||
self.storage::<T>().map_or(false, |s| s.contains(entity))
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run test**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- world::tests::test_has_component -v`
|
||||
Expected: PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_ecs/src/world.rs
|
||||
git commit -m "feat(ecs): add has_component helper to World"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: `query_with` and `query_without` (Single Component)
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_ecs/src/world.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_query_with() {
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
let e1 = world.spawn();
|
||||
let e2 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
// e1 has Position but no Velocity
|
||||
world.add(e2, Position { x: 3.0, y: 0.0 });
|
||||
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
|
||||
|
||||
let results = world.query_with::<Position, Velocity>();
|
||||
assert_eq!(results.len(), 2);
|
||||
let entities: Vec<Entity> = results.iter().map(|(e, _)| *e).collect();
|
||||
assert!(entities.contains(&e0));
|
||||
assert!(entities.contains(&e2));
|
||||
assert!(!entities.contains(&e1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_without() {
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
let e1 = world.spawn();
|
||||
let e2 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
// e1 has Position but no Velocity — should be included
|
||||
world.add(e2, Position { x: 3.0, y: 0.0 });
|
||||
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
|
||||
|
||||
let results = world.query_without::<Position, Velocity>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e1);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- world::tests::test_query_with -v`
|
||||
Expected: FAIL
|
||||
|
||||
- [ ] **Step 3: Implement query_with and query_without**
|
||||
|
||||
Add to `impl World`:
|
||||
|
||||
```rust
|
||||
/// Query entities that have component T AND also have component W.
|
||||
pub fn query_with<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
|
||||
let t_storage = match self.storage::<T>() {
|
||||
Some(s) => s,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let mut result = Vec::new();
|
||||
for (entity, data) in t_storage.iter() {
|
||||
if self.has_component::<W>(entity) {
|
||||
result.push((entity, data));
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Query entities that have component T but NOT component W.
|
||||
pub fn query_without<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
|
||||
let t_storage = match self.storage::<T>() {
|
||||
Some(s) => s,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let mut result = Vec::new();
|
||||
for (entity, data) in t_storage.iter() {
|
||||
if !self.has_component::<W>(entity) {
|
||||
result.push((entity, data));
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- world::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_ecs/src/world.rs
|
||||
git commit -m "feat(ecs): add query_with and query_without filters"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: `query2_with` and `query2_without`
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_ecs/src/world.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_query2_with() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Health(i32);
|
||||
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e0, Health(100));
|
||||
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
|
||||
// e1 has no Health
|
||||
|
||||
let results = world.query2_with::<Position, Velocity, Health>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query2_without() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Health(i32);
|
||||
|
||||
let mut world = World::new();
|
||||
let e0 = world.spawn();
|
||||
world.add(e0, Position { x: 1.0, y: 0.0 });
|
||||
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
|
||||
world.add(e0, Health(100));
|
||||
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Position { x: 2.0, y: 0.0 });
|
||||
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
|
||||
// e1 has no Health
|
||||
|
||||
let results = world.query2_without::<Position, Velocity, Health>();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0].0, e1);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
- [ ] **Step 3: Implement**
|
||||
|
||||
```rust
|
||||
/// Query entities with components A and B, that also have component W.
|
||||
pub fn query2_with<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
|
||||
self.query2::<A, B>().into_iter()
|
||||
.filter(|(e, _, _)| self.has_component::<W>(*e))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Query entities with components A and B, that do NOT have component W.
|
||||
pub fn query2_without<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
|
||||
self.query2::<A, B>().into_iter()
|
||||
.filter(|(e, _, _)| !self.has_component::<W>(*e))
|
||||
.collect()
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- world::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_ecs/src/world.rs
|
||||
git commit -m "feat(ecs): add query2_with and query2_without filters"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: System Trait + Scheduler
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_ecs/src/scheduler.rs`
|
||||
- Modify: `crates/voltex_ecs/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::World;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Counter(u32);
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_runs_in_order() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Counter(0));
|
||||
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.add(|world: &mut World| {
|
||||
let c = world.get_mut::<Counter>(world.query::<Counter>().next().unwrap().0).unwrap();
|
||||
c.0 += 1; // 0 → 1
|
||||
});
|
||||
scheduler.add(|world: &mut World| {
|
||||
let c = world.get_mut::<Counter>(world.query::<Counter>().next().unwrap().0).unwrap();
|
||||
c.0 *= 10; // 1 → 10
|
||||
});
|
||||
|
||||
scheduler.run_all(&mut world);
|
||||
|
||||
let c = world.get::<Counter>(e).unwrap();
|
||||
assert_eq!(c.0, 10); // proves order: add first, then multiply
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_empty() {
|
||||
let mut world = World::new();
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.run_all(&mut world); // should not panic
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_multiple_runs() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Counter(0));
|
||||
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler.add(|world: &mut World| {
|
||||
let c = world.get_mut::<Counter>(world.query::<Counter>().next().unwrap().0).unwrap();
|
||||
c.0 += 1;
|
||||
});
|
||||
|
||||
scheduler.run_all(&mut world);
|
||||
scheduler.run_all(&mut world);
|
||||
scheduler.run_all(&mut world);
|
||||
|
||||
assert_eq!(world.get::<Counter>(e).unwrap().0, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_add_chaining() {
|
||||
let mut scheduler = Scheduler::new();
|
||||
scheduler
|
||||
.add(|_: &mut World| {})
|
||||
.add(|_: &mut World| {});
|
||||
assert_eq!(scheduler.len(), 2);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify failure**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- scheduler::tests -v`
|
||||
Expected: FAIL — module not found
|
||||
|
||||
- [ ] **Step 3: Implement Scheduler**
|
||||
|
||||
```rust
|
||||
// crates/voltex_ecs/src/scheduler.rs
|
||||
|
||||
use crate::World;
|
||||
|
||||
/// A system that can be run on the world.
|
||||
pub trait System {
|
||||
fn run(&mut self, world: &mut World);
|
||||
}
|
||||
|
||||
/// Blanket impl: any FnMut(&mut World) is a System.
|
||||
impl<F: FnMut(&mut World)> System for F {
|
||||
fn run(&mut self, world: &mut World) {
|
||||
(self)(world);
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs registered systems in order.
|
||||
pub struct Scheduler {
|
||||
systems: Vec<Box<dyn System>>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self {
|
||||
Self { systems: Vec::new() }
|
||||
}
|
||||
|
||||
/// Add a system. Systems run in the order they are added.
|
||||
pub fn add<S: System + 'static>(&mut self, system: S) -> &mut Self {
|
||||
self.systems.push(Box::new(system));
|
||||
self
|
||||
}
|
||||
|
||||
/// Run all systems in registration order.
|
||||
pub fn run_all(&mut self, world: &mut World) {
|
||||
for system in &mut self.systems {
|
||||
system.run(world);
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of registered systems.
|
||||
pub fn len(&self) -> usize {
|
||||
self.systems.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.systems.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Scheduler {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Register in `crates/voltex_ecs/src/lib.rs`:
|
||||
```rust
|
||||
pub mod scheduler;
|
||||
pub use scheduler::{Scheduler, System};
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -- scheduler::tests -v`
|
||||
Expected: All PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_ecs/src/scheduler.rs crates/voltex_ecs/src/lib.rs
|
||||
git commit -m "feat(ecs): add System trait and ordered Scheduler"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Export and Full Build Verification
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_ecs/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Verify lib.rs exports are complete**
|
||||
|
||||
Ensure `lib.rs` exports:
|
||||
```rust
|
||||
pub use world::World; // existing — now includes query_with, query_without, etc.
|
||||
pub use scheduler::{Scheduler, System};
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run all ECS tests**
|
||||
|
||||
Run: `cargo test --package voltex_ecs -v`
|
||||
Expected: All tests PASS (existing + new)
|
||||
|
||||
- [ ] **Step 3: Run full workspace build**
|
||||
|
||||
Run: `cargo build --workspace`
|
||||
Expected: BUILD SUCCESS
|
||||
|
||||
- [ ] **Step 4: Run full workspace tests**
|
||||
|
||||
Run: `cargo test --workspace`
|
||||
Expected: All tests PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_ecs/src/lib.rs
|
||||
git commit -m "feat(ecs): complete query filters and scheduler with exports"
|
||||
```
|
||||
406
docs/superpowers/plans/2026-03-25-phase3b-scene-serialization.md
Normal file
406
docs/superpowers/plans/2026-03-25-phase3b-scene-serialization.md
Normal file
@@ -0,0 +1,406 @@
|
||||
# Scene Serialization Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add JSON scene serialization, binary scene format, and registration-based arbitrary component serialization to voltex_ecs.
|
||||
|
||||
**Architecture:** ComponentRegistry stores per-type serialize/deserialize functions. JSON and binary serializers use the registry to handle arbitrary components. Existing .vscn text format unchanged.
|
||||
|
||||
**Tech Stack:** Pure Rust, no external dependencies. Mini JSON writer/parser within voltex_ecs (not reusing voltex_renderer's json_parser to avoid dependency inversion).
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Mini JSON Writer + Parser for voltex_ecs
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_ecs/src/json.rs`
|
||||
- Modify: `crates/voltex_ecs/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests for JSON writer**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_write_null() {
|
||||
assert_eq!(json_write_null(), "null");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_number() {
|
||||
assert_eq!(json_write_f32(3.14), "3.14");
|
||||
assert_eq!(json_write_f32(1.0), "1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_string() {
|
||||
assert_eq!(json_write_string("hello"), "\"hello\"");
|
||||
assert_eq!(json_write_string("a\"b"), "\"a\\\"b\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_array() {
|
||||
assert_eq!(json_write_array(&["1", "2", "3"]), "[1,2,3]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_object() {
|
||||
let pairs = vec![("name", "\"test\""), ("value", "42")];
|
||||
let result = json_write_object(&pairs);
|
||||
assert_eq!(result, r#"{"name":"test","value":42}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_number() {
|
||||
match json_parse("42.5").unwrap() {
|
||||
JsonVal::Number(n) => assert!((n - 42.5).abs() < 1e-10),
|
||||
_ => panic!("expected number"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_string() {
|
||||
assert_eq!(json_parse("\"hello\"").unwrap(), JsonVal::Str("hello".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_array() {
|
||||
match json_parse("[1,2,3]").unwrap() {
|
||||
JsonVal::Array(a) => assert_eq!(a.len(), 3),
|
||||
_ => panic!("expected array"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_object() {
|
||||
let val = json_parse(r#"{"x":1,"y":2}"#).unwrap();
|
||||
assert!(matches!(val, JsonVal::Object(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_null() {
|
||||
assert_eq!(json_parse("null").unwrap(), JsonVal::Null);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_nested() {
|
||||
let val = json_parse(r#"{"a":[1,2],"b":{"c":3}}"#).unwrap();
|
||||
assert!(matches!(val, JsonVal::Object(_)));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement JSON writer and parser**
|
||||
|
||||
Implement in `crates/voltex_ecs/src/json.rs`:
|
||||
- `JsonVal` enum: Null, Bool(bool), Number(f64), Str(String), Array(Vec), Object(Vec<(String, JsonVal)>)
|
||||
- Writer helpers: `json_write_null`, `json_write_f32`, `json_write_string`, `json_write_array`, `json_write_object`
|
||||
- `json_parse(input: &str) -> Result<JsonVal, String>` — minimal recursive descent parser
|
||||
- `JsonVal::get(key)`, `as_f64()`, `as_str()`, `as_array()`, `as_object()` accessors
|
||||
|
||||
Register in lib.rs: `pub mod json;`
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_ecs -- json::tests -v
|
||||
git add crates/voltex_ecs/src/json.rs crates/voltex_ecs/src/lib.rs
|
||||
git commit -m "feat(ecs): add mini JSON writer and parser for scene serialization"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: ComponentRegistry
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_ecs/src/component_registry.rs`
|
||||
- Modify: `crates/voltex_ecs/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{World, Transform};
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[test]
|
||||
fn test_register_and_serialize() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
assert!(registry.find("transform").is_some());
|
||||
assert!(registry.find("tag").is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_transform() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
|
||||
let entry = registry.find("transform").unwrap();
|
||||
let data = (entry.serialize)(&world, e);
|
||||
assert!(data.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_transform() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
|
||||
let entry = registry.find("transform").unwrap();
|
||||
let data = (entry.serialize)(&world, e).unwrap();
|
||||
|
||||
let mut world2 = World::new();
|
||||
let e2 = world2.spawn();
|
||||
(entry.deserialize)(&mut world2, e2, &data).unwrap();
|
||||
|
||||
let t = world2.get::<Transform>(e2).unwrap();
|
||||
assert!((t.position.x - 1.0).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement ComponentRegistry**
|
||||
|
||||
```rust
|
||||
// crates/voltex_ecs/src/component_registry.rs
|
||||
use crate::entity::Entity;
|
||||
use crate::world::World;
|
||||
|
||||
pub type SerializeFn = fn(&World, Entity) -> Option<Vec<u8>>;
|
||||
pub type DeserializeFn = fn(&mut World, Entity, &[u8]) -> Result<(), String>;
|
||||
|
||||
pub struct ComponentEntry {
|
||||
pub name: String,
|
||||
pub serialize: SerializeFn,
|
||||
pub deserialize: DeserializeFn,
|
||||
}
|
||||
|
||||
pub struct ComponentRegistry {
|
||||
entries: Vec<ComponentEntry>,
|
||||
}
|
||||
|
||||
impl ComponentRegistry {
|
||||
pub fn new() -> Self { Self { entries: Vec::new() } }
|
||||
|
||||
pub fn register(&mut self, name: &str, ser: SerializeFn, deser: DeserializeFn) {
|
||||
self.entries.push(ComponentEntry {
|
||||
name: name.to_string(), serialize: ser, deserialize: deser,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn find(&self, name: &str) -> Option<&ComponentEntry> {
|
||||
self.entries.iter().find(|e| e.name == name)
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> &[ComponentEntry] { &self.entries }
|
||||
|
||||
pub fn register_defaults(&mut self) {
|
||||
// Transform: serialize as 9 f32s (pos.xyz, rot.xyz, scale.xyz)
|
||||
self.register("transform", serialize_transform, deserialize_transform);
|
||||
// Tag: serialize as UTF-8 string bytes
|
||||
self.register("tag", serialize_tag, deserialize_tag);
|
||||
// Parent handled specially (entity reference)
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_transform(world: &World, entity: Entity) -> Option<Vec<u8>> {
|
||||
let t = world.get::<crate::Transform>(entity)?;
|
||||
let mut data = Vec::with_capacity(36);
|
||||
for &v in &[t.position.x, t.position.y, t.position.z,
|
||||
t.rotation.x, t.rotation.y, t.rotation.z,
|
||||
t.scale.x, t.scale.y, t.scale.z] {
|
||||
data.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
Some(data)
|
||||
}
|
||||
|
||||
fn deserialize_transform(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
|
||||
if data.len() < 36 { return Err("Transform data too short".into()); }
|
||||
let f = |off: usize| f32::from_le_bytes([data[off], data[off+1], data[off+2], data[off+3]]);
|
||||
let t = crate::Transform {
|
||||
position: voltex_math::Vec3::new(f(0), f(4), f(8)),
|
||||
rotation: voltex_math::Vec3::new(f(12), f(16), f(20)),
|
||||
scale: voltex_math::Vec3::new(f(24), f(28), f(32)),
|
||||
};
|
||||
world.add(entity, t);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_tag(world: &World, entity: Entity) -> Option<Vec<u8>> {
|
||||
let tag = world.get::<crate::scene::Tag>(entity)?;
|
||||
Some(tag.0.as_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn deserialize_tag(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
|
||||
let s = std::str::from_utf8(data).map_err(|_| "Invalid UTF-8 in tag")?;
|
||||
world.add(entity, crate::scene::Tag(s.to_string()));
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_ecs -- component_registry -v
|
||||
git add crates/voltex_ecs/src/component_registry.rs crates/voltex_ecs/src/lib.rs
|
||||
git commit -m "feat(ecs): add ComponentRegistry for arbitrary component serialization"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: JSON Scene Serialization
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_ecs/src/scene.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_json_roundtrip() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
world.add(e, Tag("player".into()));
|
||||
|
||||
let json = serialize_scene_json(&world, ®istry);
|
||||
assert!(json.contains("\"version\":1"));
|
||||
assert!(json.contains("\"player\""));
|
||||
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_json(&mut world2, &json, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 1);
|
||||
let t = world2.get::<Transform>(entities[0]).unwrap();
|
||||
assert!((t.position.x - 1.0).abs() < 1e-4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_with_parent() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let parent = world.spawn();
|
||||
let child = world.spawn();
|
||||
world.add(parent, Transform::new());
|
||||
world.add(child, Transform::new());
|
||||
add_child(&mut world, parent, child);
|
||||
|
||||
let json = serialize_scene_json(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_json(&mut world2, &json, ®istry).unwrap();
|
||||
assert!(world2.get::<Parent>(entities[1]).is_some());
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement serialize_scene_json and deserialize_scene_json**
|
||||
|
||||
Use the json module's writer/parser. Serialize components via registry.
|
||||
Format: `{"version":1,"entities":[{"parent":null_or_idx,"components":{"transform":"base64_data","tag":"base64_data",...}}]}`
|
||||
Use base64 encoding for component binary data in JSON (reuse decoder pattern or simple hex encoding).
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Binary Scene Format
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_ecs/src/binary_scene.rs`
|
||||
- Modify: `crates/voltex_ecs/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_binary_roundtrip() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(5.0, 0.0, -1.0)));
|
||||
world.add(e, Tag("enemy".into()));
|
||||
|
||||
let data = serialize_scene_binary(&world, ®istry);
|
||||
assert_eq!(&data[0..4], b"VSCN");
|
||||
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_binary(&mut world2, &data, ®istry).unwrap();
|
||||
assert_eq!(entities.len(), 1);
|
||||
let t = world2.get::<Transform>(entities[0]).unwrap();
|
||||
assert!((t.position.x - 5.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_with_hierarchy() {
|
||||
let mut registry = ComponentRegistry::new();
|
||||
registry.register_defaults();
|
||||
let mut world = World::new();
|
||||
let a = world.spawn();
|
||||
let b = world.spawn();
|
||||
world.add(a, Transform::new());
|
||||
world.add(b, Transform::new());
|
||||
add_child(&mut world, a, b);
|
||||
|
||||
let data = serialize_scene_binary(&world, ®istry);
|
||||
let mut world2 = World::new();
|
||||
let entities = deserialize_scene_binary(&mut world2, &data, ®istry).unwrap();
|
||||
assert!(world2.get::<Parent>(entities[1]).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_invalid_magic() {
|
||||
let data = vec![0u8; 20];
|
||||
let mut world = World::new();
|
||||
let registry = ComponentRegistry::new();
|
||||
assert!(deserialize_scene_binary(&mut world, &data, ®istry).is_err());
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement binary format**
|
||||
|
||||
Header: `VSCN` + version(1) u32 LE + entity_count u32 LE
|
||||
Per entity: parent_index i32 LE (-1 = no parent) + component_count u32 LE
|
||||
Per component: name_len u16 LE + name bytes + data_len u32 LE + data bytes
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Exports and Full Verification
|
||||
|
||||
- [ ] **Step 1: Update lib.rs exports**
|
||||
|
||||
```rust
|
||||
pub mod json;
|
||||
pub mod component_registry;
|
||||
pub mod binary_scene;
|
||||
pub use component_registry::ComponentRegistry;
|
||||
pub use binary_scene::{serialize_scene_binary, deserialize_scene_binary};
|
||||
// scene.rs already exports serialize_scene, deserialize_scene
|
||||
// Add: pub use scene::{serialize_scene_json, deserialize_scene_json};
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run full tests**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_ecs -v
|
||||
cargo build --workspace
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(ecs): complete JSON and binary scene serialization with component registry"
|
||||
```
|
||||
354
docs/superpowers/plans/2026-03-25-phase3c-async-hotreload.md
Normal file
354
docs/superpowers/plans/2026-03-25-phase3c-async-hotreload.md
Normal file
@@ -0,0 +1,354 @@
|
||||
# Async Loading + Hot Reload Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add background asset loading via worker thread and file-change-based hot reload to voltex_asset.
|
||||
|
||||
**Architecture:** AssetLoader spawns one worker thread, communicates via channels. FileWatcher polls `std::fs::metadata` for mtime changes. Both are independent modules.
|
||||
|
||||
**Tech Stack:** Pure Rust std library (threads, channels, fs). No external crates.
|
||||
|
||||
---
|
||||
|
||||
### Task 1: FileWatcher (mtime polling)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_asset/src/watcher.rs`
|
||||
- Modify: `crates/voltex_asset/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn test_watch_and_poll_no_changes() {
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
let dir = std::env::temp_dir().join("voltex_watcher_test_1");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test.txt");
|
||||
fs::write(&path, "hello").unwrap();
|
||||
|
||||
watcher.watch(path.clone());
|
||||
// First poll — should not report as changed (just registered)
|
||||
let changes = watcher.poll_changes();
|
||||
assert!(changes.is_empty());
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_file_change() {
|
||||
let dir = std::env::temp_dir().join("voltex_watcher_test_2");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test2.txt");
|
||||
fs::write(&path, "v1").unwrap();
|
||||
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
watcher.watch(path.clone());
|
||||
let _ = watcher.poll_changes(); // register initial mtime
|
||||
|
||||
// Modify file
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
fs::write(&path, "v2 with more data").unwrap();
|
||||
|
||||
let changes = watcher.poll_changes();
|
||||
assert!(changes.contains(&path));
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unwatch() {
|
||||
let mut watcher = FileWatcher::new(Duration::from_millis(0));
|
||||
let path = PathBuf::from("/nonexistent/test.txt");
|
||||
watcher.watch(path.clone());
|
||||
watcher.unwatch(&path);
|
||||
assert!(watcher.poll_changes().is_empty());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement FileWatcher**
|
||||
|
||||
```rust
|
||||
// crates/voltex_asset/src/watcher.rs
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
pub struct FileWatcher {
|
||||
watched: HashMap<PathBuf, Option<SystemTime>>,
|
||||
poll_interval: Duration,
|
||||
last_poll: Instant,
|
||||
}
|
||||
|
||||
impl FileWatcher {
|
||||
pub fn new(poll_interval: Duration) -> Self {
|
||||
Self {
|
||||
watched: HashMap::new(),
|
||||
poll_interval,
|
||||
last_poll: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn watch(&mut self, path: PathBuf) {
|
||||
let mtime = std::fs::metadata(&path).ok()
|
||||
.and_then(|m| m.modified().ok());
|
||||
self.watched.insert(path, mtime);
|
||||
}
|
||||
|
||||
pub fn unwatch(&mut self, path: &Path) {
|
||||
self.watched.remove(path);
|
||||
}
|
||||
|
||||
pub fn poll_changes(&mut self) -> Vec<PathBuf> {
|
||||
let now = Instant::now();
|
||||
if now.duration_since(self.last_poll) < self.poll_interval {
|
||||
return Vec::new();
|
||||
}
|
||||
self.last_poll = now;
|
||||
|
||||
let mut changed = Vec::new();
|
||||
for (path, last_mtime) in &mut self.watched {
|
||||
let current = std::fs::metadata(path).ok()
|
||||
.and_then(|m| m.modified().ok());
|
||||
if current != *last_mtime && last_mtime.is_some() {
|
||||
changed.push(path.clone());
|
||||
}
|
||||
*last_mtime = current;
|
||||
}
|
||||
changed
|
||||
}
|
||||
|
||||
pub fn watched_count(&self) -> usize {
|
||||
self.watched.len()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_asset -- watcher::tests -v
|
||||
git add crates/voltex_asset/src/watcher.rs crates/voltex_asset/src/lib.rs
|
||||
git commit -m "feat(asset): add FileWatcher with mtime-based change detection"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: AssetLoader (background thread)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_asset/src/loader.rs`
|
||||
- Modify: `crates/voltex_asset/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests**
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
|
||||
#[test]
|
||||
fn test_load_state_initial() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let dir = std::env::temp_dir().join("voltex_loader_test_1");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("test.txt");
|
||||
fs::write(&path, "hello world").unwrap();
|
||||
|
||||
let handle: Handle<String> = loader.load(
|
||||
path.clone(),
|
||||
|data| Ok(String::from_utf8_lossy(data).to_string()),
|
||||
);
|
||||
|
||||
// Initially loading
|
||||
assert!(matches!(loader.state::<String>(&handle), LoadState::Loading));
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
loader.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_and_process() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let dir = std::env::temp_dir().join("voltex_loader_test_2");
|
||||
let _ = fs::create_dir_all(&dir);
|
||||
let path = dir.join("data.txt");
|
||||
fs::write(&path, "content123").unwrap();
|
||||
|
||||
let handle: Handle<String> = loader.load(
|
||||
path.clone(),
|
||||
|data| Ok(String::from_utf8_lossy(data).to_string()),
|
||||
);
|
||||
|
||||
// Wait for worker
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut assets = Assets::new();
|
||||
loader.process_loaded(&mut assets);
|
||||
|
||||
assert!(matches!(loader.state::<String>(&handle), LoadState::Ready));
|
||||
let val = assets.get(handle).unwrap();
|
||||
assert_eq!(val, "content123");
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
loader.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_nonexistent_fails() {
|
||||
let mut loader = AssetLoader::new();
|
||||
let handle: Handle<String> = loader.load(
|
||||
PathBuf::from("/nonexistent/file.txt"),
|
||||
|data| Ok(String::from_utf8_lossy(data).to_string()),
|
||||
);
|
||||
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut assets = Assets::new();
|
||||
loader.process_loaded(&mut assets);
|
||||
|
||||
assert!(matches!(loader.state::<String>(&handle), LoadState::Failed(_)));
|
||||
loader.shutdown();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement AssetLoader**
|
||||
|
||||
Key design:
|
||||
- Worker thread reads files from disk via channel
|
||||
- `LoadRequest` contains path + parse function (boxed)
|
||||
- `LoadResult` contains handle id + parsed asset (boxed Any) or error
|
||||
- `process_loaded` drains results channel and inserts into Assets
|
||||
- Handle is pre-allocated with a placeholder in a pending map
|
||||
- `state()` checks pending map for Loading/Failed, or assets for Ready
|
||||
|
||||
```rust
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc::{channel, Sender, Receiver};
|
||||
use std::thread::{self, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use crate::handle::Handle;
|
||||
use crate::assets::Assets;
|
||||
|
||||
pub enum LoadState {
|
||||
Loading,
|
||||
Ready,
|
||||
Failed(String),
|
||||
}
|
||||
|
||||
struct LoadRequest {
|
||||
id: u64,
|
||||
path: PathBuf,
|
||||
parse: Box<dyn FnOnce(&[u8]) -> Result<Box<dyn Any + Send>, String> + Send>,
|
||||
}
|
||||
|
||||
struct LoadResult {
|
||||
id: u64,
|
||||
result: Result<Box<dyn Any + Send>, String>,
|
||||
}
|
||||
|
||||
pub struct AssetLoader {
|
||||
sender: Sender<LoadRequest>,
|
||||
receiver: Receiver<LoadResult>,
|
||||
thread: Option<JoinHandle<()>>,
|
||||
next_id: u64,
|
||||
pending: HashMap<u64, PendingEntry>,
|
||||
}
|
||||
|
||||
struct PendingEntry {
|
||||
state: LoadState,
|
||||
handle_id: u32,
|
||||
handle_gen: u32,
|
||||
type_id: std::any::TypeId,
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_asset -- loader::tests -v
|
||||
git add crates/voltex_asset/src/loader.rs crates/voltex_asset/src/lib.rs
|
||||
git commit -m "feat(asset): add AssetLoader with background thread loading"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Storage replace_in_place for Hot Reload
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_asset/src/storage.rs`
|
||||
|
||||
- [ ] **Step 1: Write test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn replace_in_place() {
|
||||
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
|
||||
let h = storage.insert(Mesh { verts: 3 });
|
||||
storage.replace_in_place(h, Mesh { verts: 99 });
|
||||
assert_eq!(storage.get(h).unwrap().verts, 99);
|
||||
// Same handle still works — generation unchanged
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement replace_in_place**
|
||||
|
||||
```rust
|
||||
/// Replace the asset data without changing generation or ref_count.
|
||||
/// Used for hot reload — existing handles remain valid.
|
||||
pub fn replace_in_place(&mut self, handle: Handle<T>, new_asset: T) -> bool {
|
||||
if let Some(Some(entry)) = self.entries.get_mut(handle.id as usize) {
|
||||
if entry.generation == handle.generation {
|
||||
entry.asset = new_asset;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests, commit**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_asset -- storage::tests -v
|
||||
git add crates/voltex_asset/src/storage.rs
|
||||
git commit -m "feat(asset): add replace_in_place for hot reload support"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Exports and Full Verification
|
||||
|
||||
- [ ] **Step 1: Update lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod watcher;
|
||||
pub mod loader;
|
||||
pub use watcher::FileWatcher;
|
||||
pub use loader::{AssetLoader, LoadState};
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run full tests**
|
||||
|
||||
```bash
|
||||
cargo test --package voltex_asset -v
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(asset): complete async loading and hot reload support"
|
||||
```
|
||||
294
docs/superpowers/plans/2026-03-25-phase4a-pbr-textures.md
Normal file
294
docs/superpowers/plans/2026-03-25-phase4a-pbr-textures.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# PBR Texture Maps Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add metallic/roughness/AO (ORM) and emissive texture map sampling to PBR shaders, extending bind group 1 from 4 to 8 bindings.
|
||||
|
||||
**Architecture:** Extend `pbr_texture_bind_group_layout` with 4 new bindings (ORM texture+sampler, emissive texture+sampler). Update forward PBR shader and deferred G-Buffer shader. Default 1x1 white/black textures when maps not provided. MaterialUniform values become multipliers for texture values.
|
||||
|
||||
**Tech Stack:** wgpu 28.0, WGSL shaders. No new Rust crates.
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Texture Utility Additions
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/texture.rs`
|
||||
|
||||
- [ ] **Step 1: Write test for black_1x1**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_black_1x1_exists() {
|
||||
// This is a compile/API test — GPU tests need device
|
||||
// We verify the function signature exists and the module compiles
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Add black_1x1 and extended layout functions**
|
||||
|
||||
Add to `texture.rs`:
|
||||
|
||||
```rust
|
||||
/// 1x1 black texture for emissive default (no emission).
|
||||
pub fn black_1x1(
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
layout: &wgpu::BindGroupLayout,
|
||||
) -> GpuTexture {
|
||||
Self::from_rgba(device, queue, 1, 1, &[0, 0, 0, 255], layout)
|
||||
}
|
||||
|
||||
/// Extended PBR texture layout: albedo + normal + ORM + emissive (8 bindings).
|
||||
pub fn pbr_full_texture_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("PBR Full Texture Bind Group Layout"),
|
||||
entries: &[
|
||||
// 0-1: albedo (existing)
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// 2-3: normal map (existing)
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// 4-5: ORM (AO/Roughness/Metallic) — NEW
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 5,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// 6-7: Emissive — NEW
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 6,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 7,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
/// Create bind group for full PBR textures (albedo + normal + ORM + emissive).
|
||||
pub fn create_pbr_full_texture_bind_group(
|
||||
device: &wgpu::Device,
|
||||
layout: &wgpu::BindGroupLayout,
|
||||
albedo_view: &wgpu::TextureView,
|
||||
albedo_sampler: &wgpu::Sampler,
|
||||
normal_view: &wgpu::TextureView,
|
||||
normal_sampler: &wgpu::Sampler,
|
||||
orm_view: &wgpu::TextureView,
|
||||
orm_sampler: &wgpu::Sampler,
|
||||
emissive_view: &wgpu::TextureView,
|
||||
emissive_sampler: &wgpu::Sampler,
|
||||
) -> wgpu::BindGroup {
|
||||
// ... 8 entries at bindings 0-7
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Add exports to lib.rs**
|
||||
|
||||
```rust
|
||||
pub use texture::{pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group};
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/texture.rs crates/voltex_renderer/src/lib.rs
|
||||
git commit -m "feat(renderer): add ORM and emissive texture bind group layout"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: Update PBR Forward Shader
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/pbr_shader.wgsl`
|
||||
|
||||
- [ ] **Step 1: Add ORM and emissive bindings**
|
||||
|
||||
Add after existing normal map bindings:
|
||||
|
||||
```wgsl
|
||||
@group(1) @binding(4) var t_orm: texture_2d<f32>;
|
||||
@group(1) @binding(5) var s_orm: sampler;
|
||||
@group(1) @binding(6) var t_emissive: texture_2d<f32>;
|
||||
@group(1) @binding(7) var s_emissive: sampler;
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update fragment shader to sample ORM**
|
||||
|
||||
Replace lines that read material params directly:
|
||||
|
||||
```wgsl
|
||||
// Before:
|
||||
// let metallic = material.metallic;
|
||||
// let roughness = material.roughness;
|
||||
// let ao = material.ao;
|
||||
|
||||
// After:
|
||||
let orm_sample = textureSample(t_orm, s_orm, in.uv);
|
||||
let ao = orm_sample.r * material.ao;
|
||||
let roughness = orm_sample.g * material.roughness;
|
||||
let metallic = orm_sample.b * material.metallic;
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Add emissive to final color**
|
||||
|
||||
```wgsl
|
||||
let emissive = textureSample(t_emissive, s_emissive, in.uv).rgb;
|
||||
|
||||
// Before tone mapping:
|
||||
var color = ambient + Lo + emissive;
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Verify existing examples still compile**
|
||||
|
||||
The existing examples use `pbr_texture_bind_group_layout` (4 bindings). They should continue to work with the OLD layout — the new `pbr_full_texture_bind_group_layout` is a separate function. The shader needs to match whichever layout is used.
|
||||
|
||||
**Strategy:** Create a NEW shader variant `pbr_shader_full.wgsl` with ORM+emissive support, keeping the old shader untouched for backward compatibility. OR add a preprocessor approach.
|
||||
|
||||
**Simpler approach:** Just update `pbr_shader.wgsl` to include the new bindings AND update ALL examples that use it to use the full layout with default white/black textures. This avoids shader duplication.
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/pbr_shader.wgsl
|
||||
git commit -m "feat(renderer): add ORM and emissive texture sampling to PBR shader"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Update Deferred G-Buffer Shader
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/deferred_gbuffer.wgsl`
|
||||
- Modify: `crates/voltex_renderer/src/deferred_lighting.wgsl`
|
||||
|
||||
- [ ] **Step 1: Add ORM/emissive bindings to G-Buffer shader**
|
||||
|
||||
Same pattern as forward shader — add bindings 4-7, sample ORM for metallic/roughness/ao, store in material_data output.
|
||||
|
||||
- [ ] **Step 2: Store emissive in G-Buffer**
|
||||
|
||||
Option: Pack emissive into existing G-Buffer outputs or add a 5th MRT.
|
||||
|
||||
**Recommended:** Store emissive in material_data.a (was padding=1.0). Simple, no extra MRT.
|
||||
|
||||
```wgsl
|
||||
// G-Buffer output location(3): [metallic, roughness, ao, emissive_intensity]
|
||||
// Emissive color stored as luminance for simplicity, or use location(2).a
|
||||
out.material_data = vec4<f32>(metallic, roughness, ao, emissive_luminance);
|
||||
```
|
||||
|
||||
Actually simpler: add emissive directly in the lighting pass as a texture sample pass-through. But that requires the emissive texture in the lighting pass too.
|
||||
|
||||
**Simplest approach:** Write emissive to albedo output additively, since emissive bypasses lighting.
|
||||
|
||||
```wgsl
|
||||
// In deferred_gbuffer.wgsl:
|
||||
let emissive = textureSample(t_emissive, s_emissive, in.uv).rgb;
|
||||
// Store emissive luminance in material_data.w (was 1.0 padding)
|
||||
let emissive_lum = dot(emissive, vec3<f32>(0.299, 0.587, 0.114));
|
||||
out.material_data = vec4<f32>(metallic, roughness, ao, emissive_lum);
|
||||
```
|
||||
|
||||
Then in `deferred_lighting.wgsl`, read `material_data.w` as emissive intensity and add `albedo * emissive_lum` to final color.
|
||||
|
||||
- [ ] **Step 3: Update deferred_lighting.wgsl**
|
||||
|
||||
```wgsl
|
||||
let emissive_lum = textureSample(g_material, s_gbuffer, uv).w;
|
||||
// After lighting calculation:
|
||||
color += albedo * emissive_lum;
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_renderer/src/deferred_gbuffer.wgsl crates/voltex_renderer/src/deferred_lighting.wgsl
|
||||
git commit -m "feat(renderer): add ORM and emissive to deferred rendering pipeline"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Update Examples + Pipeline Creation
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_renderer/src/pbr_pipeline.rs`
|
||||
- Modify: `examples/pbr_demo/src/main.rs`
|
||||
- Modify: `examples/ibl_demo/src/main.rs`
|
||||
- Modify: `crates/voltex_renderer/src/deferred_pipeline.rs`
|
||||
|
||||
- [ ] **Step 1: Update examples to use full texture layout**
|
||||
|
||||
In each example that uses `pbr_texture_bind_group_layout`:
|
||||
- Switch to `pbr_full_texture_bind_group_layout`
|
||||
- Create default ORM texture (white_1x1 = ao=1, roughness=1, metallic=1 — but material multipliers control actual values)
|
||||
- Create default emissive texture (black_1x1 = no emission)
|
||||
- Pass all 4 texture pairs to `create_pbr_full_texture_bind_group`
|
||||
|
||||
- [ ] **Step 2: Update deferred_pipeline.rs**
|
||||
|
||||
Update gbuffer pipeline layout to use the full texture layout.
|
||||
|
||||
- [ ] **Step 3: Build and test**
|
||||
|
||||
```bash
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(renderer): update examples and pipelines for full PBR texture maps"
|
||||
```
|
||||
137
docs/superpowers/specs/2026-03-25-phase2-3a-deferred.md
Normal file
137
docs/superpowers/specs/2026-03-25-phase2-3a-deferred.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# Phase 2-3a Deferred Items Spec
|
||||
|
||||
## A. JPG 디코더 (`voltex_renderer/src/jpg.rs`)
|
||||
|
||||
### 범위
|
||||
- Baseline JPEG (SOF0) only
|
||||
- Progressive, Arithmetic coding 미지원
|
||||
- 외부 의존성 없음 (자체 구현)
|
||||
|
||||
### API
|
||||
```rust
|
||||
pub fn parse_jpg(data: &[u8]) -> Result<(Vec<u8>, u32, u32), String>
|
||||
```
|
||||
- PNG `parse_png`과 동일 패턴: (RGBA pixels, width, height) 반환
|
||||
|
||||
### 구현 요소
|
||||
1. JFIF 마커 파싱: SOI, SOF0, DHT, DQT, SOS, EOI
|
||||
2. Huffman 디코더: DC/AC 테이블 구축 + 비트스트림 디코딩
|
||||
3. 역양자화 (dequantization): DQT 테이블 × DCT 계수
|
||||
4. 8x8 IDCT: 정수 또는 부동소수점
|
||||
5. YCbCr → RGB 색공간 변환
|
||||
6. MCU 블록 조립: 4:4:4, 4:2:2, 4:2:0 크로마 서브샘플링
|
||||
7. Restart marker (DRI/RST) 지원
|
||||
|
||||
### 테스트
|
||||
- 최소 synthetic JPEG 바이트로 roundtrip 검증
|
||||
- Huffman 테이블 구축 단위 테스트
|
||||
- IDCT 정확도 테스트
|
||||
- 서브샘플링별 디코딩 테스트
|
||||
|
||||
---
|
||||
|
||||
## B. glTF/GLB 파서 (`voltex_renderer/src/gltf.rs`)
|
||||
|
||||
### 범위
|
||||
- glTF 2.0 JSON (embedded base64) + .glb 바이너리
|
||||
- 메시 지오메트리 + 기본 PBR 머티리얼
|
||||
- 애니메이션, 스킨, 카메라, 라이트 확장은 미포함
|
||||
|
||||
### API
|
||||
```rust
|
||||
pub fn parse_gltf(data: &[u8]) -> Result<GltfData, String>
|
||||
|
||||
pub struct GltfData {
|
||||
pub meshes: Vec<GltfMesh>,
|
||||
}
|
||||
|
||||
pub struct GltfMesh {
|
||||
pub vertices: Vec<MeshVertex>, // 기존 MeshVertex 재사용
|
||||
pub indices: Vec<u32>,
|
||||
pub name: Option<String>,
|
||||
pub material: Option<GltfMaterial>,
|
||||
}
|
||||
|
||||
pub struct GltfMaterial {
|
||||
pub base_color: [f32; 4],
|
||||
pub metallic: f32,
|
||||
pub roughness: f32,
|
||||
}
|
||||
```
|
||||
|
||||
### 구현 요소
|
||||
1. GLB 헤더 파싱: magic(0x46546C67), version 2, JSON chunk, BIN chunk
|
||||
2. 미니 JSON 파서: 외부 의존성 없음, glTF에 필요한 subset만
|
||||
3. Accessor/BufferView → 정점 데이터 추출
|
||||
- POSITION (vec3), NORMAL (vec3), TEXCOORD_0 (vec2), TANGENT (vec4)
|
||||
- indices (u16/u32)
|
||||
4. 탄젠트 없으면 기존 `compute_tangents()` 재사용
|
||||
5. Material: pbrMetallicRoughness → GltfMaterial 매핑
|
||||
6. Embedded base64 버퍼 디코딩
|
||||
|
||||
### 테스트
|
||||
- 미니 JSON 파서 단위 테스트
|
||||
- GLB 헤더 파싱 테스트
|
||||
- 최소 삼각형 GLB 바이트 수동 생성 roundtrip
|
||||
- Accessor 타입별 (u16/u32, f32) 추출 테스트
|
||||
|
||||
---
|
||||
|
||||
## C. ECS 쿼리 필터 + 시스템 스케줄러
|
||||
|
||||
### C-1. 쿼리 필터 (`voltex_ecs/src/world.rs` 확장)
|
||||
|
||||
#### API
|
||||
```rust
|
||||
// 마커 타입
|
||||
pub struct With<T>(PhantomData<T>);
|
||||
pub struct Without<T>(PhantomData<T>);
|
||||
|
||||
// World 메서드 확장
|
||||
impl World {
|
||||
// 단일 컴포넌트 + 필터
|
||||
pub fn query_with<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)>
|
||||
pub fn query_without<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)>
|
||||
|
||||
// 2-컴포넌트 + 필터
|
||||
pub fn query2_with<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)>
|
||||
pub fn query2_without<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)>
|
||||
}
|
||||
```
|
||||
|
||||
#### 구현
|
||||
- 기존 query 결과에서 `has_component::<W>(entity)` 로 필터링
|
||||
- 기존 query/query2/query3/query4는 그대로 유지 (하위 호환)
|
||||
|
||||
### C-2. 시스템 스케줄러 (`voltex_ecs/src/scheduler.rs` 신규)
|
||||
|
||||
#### API
|
||||
```rust
|
||||
pub trait System {
|
||||
fn run(&mut self, world: &mut World);
|
||||
}
|
||||
|
||||
// fn(&mut World) → impl System 자동 변환
|
||||
impl<F: FnMut(&mut World)> System for F { ... }
|
||||
|
||||
pub struct Scheduler {
|
||||
systems: Vec<Box<dyn System>>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self
|
||||
pub fn add<S: System + 'static>(&mut self, system: S) -> &mut Self
|
||||
pub fn run_all(&mut self, world: &mut World) // 등록 순서대로 실행
|
||||
}
|
||||
```
|
||||
|
||||
#### 구현
|
||||
- 시스템은 등록 순서대로 순차 실행
|
||||
- `fn(&mut World)` 클로저를 System으로 자동 변환
|
||||
- 의존성 해석이나 병렬 실행 없음 (간단한 순서 기반)
|
||||
|
||||
### 테스트
|
||||
- With/Without 필터 조합 검증
|
||||
- 필터 + query2 조합
|
||||
- 스케줄러 실행 순서 검증
|
||||
- 빈 스케줄러, 단일/다중 시스템
|
||||
156
docs/superpowers/specs/2026-03-25-phase3b-4a-deferred.md
Normal file
156
docs/superpowers/specs/2026-03-25-phase3b-4a-deferred.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# Phase 3b-4a Deferred Items Spec
|
||||
|
||||
## A. 씬 직렬화 (`voltex_ecs`)
|
||||
|
||||
### JSON 직렬화 (`scene.rs` 확장)
|
||||
|
||||
- `serialize_scene_json(world, registry) -> String`
|
||||
- `deserialize_scene_json(world, json, registry) -> Result<Vec<Entity>, String>`
|
||||
- voltex_ecs 내부에 미니 JSON writer + 미니 JSON parser (voltex_renderer 의존 없음)
|
||||
|
||||
포맷:
|
||||
```json
|
||||
{"version":1,"entities":[
|
||||
{"transform":{"position":[0,0,0],"rotation":[0,0,0],"scale":[1,1,1]},
|
||||
"tag":"player","parent":null,"components":{"rigid_body":{...}}}
|
||||
]}
|
||||
```
|
||||
|
||||
### 바이너리 씬 포맷 (`binary_scene.rs` 신규)
|
||||
|
||||
- `serialize_scene_binary(world, registry) -> Vec<u8>`
|
||||
- `deserialize_scene_binary(world, data, registry) -> Result<Vec<Entity>, String>`
|
||||
|
||||
포맷:
|
||||
```
|
||||
[4] magic "VSCN"
|
||||
[4] version u32 LE
|
||||
[4] entity_count u32 LE
|
||||
per entity:
|
||||
[4] component_count u32 LE
|
||||
per component:
|
||||
[2] name_len u16 LE
|
||||
[N] name bytes (UTF-8)
|
||||
[4] data_len u32 LE
|
||||
[N] data bytes
|
||||
```
|
||||
|
||||
### 임의 컴포넌트 등록 (`component_registry.rs` 신규)
|
||||
|
||||
```rust
|
||||
pub type SerializeFn = fn(&World, Entity) -> Option<Vec<u8>>;
|
||||
pub type DeserializeFn = fn(&mut World, Entity, &[u8]) -> Result<(), String>;
|
||||
|
||||
pub struct ComponentRegistry {
|
||||
entries: Vec<ComponentEntry>,
|
||||
}
|
||||
|
||||
struct ComponentEntry {
|
||||
name: String,
|
||||
serialize: SerializeFn,
|
||||
deserialize: DeserializeFn,
|
||||
}
|
||||
|
||||
impl ComponentRegistry {
|
||||
pub fn new() -> Self
|
||||
pub fn register(&mut self, name: &str, ser: SerializeFn, deser: DeserializeFn)
|
||||
pub fn register_defaults(&mut self) // Transform, Parent, Tag
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## B. 비동기 로딩 + 핫 리로드 (`voltex_asset`)
|
||||
|
||||
### 비동기 로딩 (`loader.rs` 신규)
|
||||
|
||||
```rust
|
||||
pub enum LoadState {
|
||||
Loading,
|
||||
Ready,
|
||||
Failed(String),
|
||||
}
|
||||
|
||||
pub struct AssetLoader {
|
||||
sender: Sender<LoadRequest>,
|
||||
receiver: Receiver<LoadResult>,
|
||||
thread: Option<JoinHandle<()>>,
|
||||
pending: HashMap<u64, PendingAsset>,
|
||||
}
|
||||
|
||||
impl AssetLoader {
|
||||
pub fn new() -> Self // 워커 스레드 1개 시작
|
||||
pub fn load<T: Send + 'static>(
|
||||
&mut self, path: PathBuf,
|
||||
parse: fn(&[u8]) -> Result<T, String>,
|
||||
) -> Handle<T> // 즉시 핸들 반환, 백그라운드 로딩
|
||||
pub fn state<T: 'static>(&self, handle: Handle<T>) -> LoadState
|
||||
pub fn process_loaded(&mut self, assets: &mut Assets) // 매 프레임 호출
|
||||
pub fn shutdown(self)
|
||||
}
|
||||
```
|
||||
|
||||
### 핫 리로드 (`watcher.rs` 신규)
|
||||
|
||||
```rust
|
||||
pub struct FileWatcher {
|
||||
watched: HashMap<PathBuf, SystemTime>, // path → last_modified
|
||||
poll_interval: Duration,
|
||||
last_poll: Instant,
|
||||
}
|
||||
|
||||
impl FileWatcher {
|
||||
pub fn new(poll_interval: Duration) -> Self
|
||||
pub fn watch(&mut self, path: PathBuf)
|
||||
pub fn unwatch(&mut self, path: &Path)
|
||||
pub fn poll_changes(&mut self) -> Vec<PathBuf> // std::fs::metadata 기반
|
||||
}
|
||||
```
|
||||
|
||||
- 외부 크레이트 없음, `std::fs::metadata().modified()` 사용
|
||||
- 변경 감지 시 AssetLoader로 재로딩 트리거
|
||||
- 기존 Handle은 유지, AssetStorage에서 in-place swap (generation 유지)
|
||||
|
||||
---
|
||||
|
||||
## C. PBR 텍스처 맵 (`voltex_renderer`)
|
||||
|
||||
### 텍스처 바인딩 확장
|
||||
|
||||
Group 1 확장 (4→8 바인딩):
|
||||
```
|
||||
Binding 0-1: Albedo texture + sampler (기존)
|
||||
Binding 2-3: Normal map texture + sampler (기존)
|
||||
Binding 4-5: Metallic/Roughness/AO (ORM) texture + sampler (신규)
|
||||
Binding 6-7: Emissive texture + sampler (신규)
|
||||
```
|
||||
|
||||
- ORM 텍스처: R=AO, G=Roughness, B=Metallic (glTF ORM 패턴)
|
||||
- 텍스처 없으면 기본 1x1 white 사용
|
||||
|
||||
### 셰이더 변경
|
||||
|
||||
`pbr_shader.wgsl` + `deferred_gbuffer.wgsl`:
|
||||
```wgsl
|
||||
@group(1) @binding(4) var t_orm: texture_2d<f32>;
|
||||
@group(1) @binding(5) var s_orm: sampler;
|
||||
@group(1) @binding(6) var t_emissive: texture_2d<f32>;
|
||||
@group(1) @binding(7) var s_emissive: sampler;
|
||||
|
||||
// Fragment:
|
||||
let orm = textureSample(t_orm, s_orm, in.uv);
|
||||
let ao = orm.r * material.ao;
|
||||
let roughness = orm.g * material.roughness;
|
||||
let metallic = orm.b * material.metallic;
|
||||
let emissive = textureSample(t_emissive, s_emissive, in.uv).rgb;
|
||||
// ... add emissive to final color
|
||||
```
|
||||
|
||||
### MaterialUniform 변경 없음
|
||||
- 기존 metallic/roughness/ao 값은 텍스처 값의 승수(multiplier)로 작동
|
||||
- 텍스처 없을 때 white(1,1,1) × material 값 = 기존과 동일 결과
|
||||
|
||||
### 텍스처 유틸 확장 (`texture.rs`)
|
||||
- `pbr_full_texture_bind_group_layout()` — 8 바인딩 레이아웃
|
||||
- `create_pbr_full_texture_bind_group()` — albedo + normal + ORM + emissive
|
||||
- `black_1x1()` — emissive 기본값 (검정 = 발광 없음)
|
||||
89
docs/superpowers/specs/2026-03-25-phase4b-5-deferred.md
Normal file
89
docs/superpowers/specs/2026-03-25-phase4b-5-deferred.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Phase 4b-5 Deferred Items Spec
|
||||
|
||||
## A. 그림자 확장 + 라이트 컬링 (`voltex_renderer`)
|
||||
|
||||
### CSM (2 캐스케이드)
|
||||
- `CascadedShadowMap` — 2개 depth 텍스처 (near 0~20m, far 20~100m), 각 2048x2048
|
||||
- 카메라 프러스텀 분할, 캐스케이드별 light VP 행렬 계산
|
||||
- `ShadowUniform` 확장: `light_view_proj` 2개 + `cascade_split` f32
|
||||
- 셰이더: fragment depth로 캐스케이드 선택, 해당 맵에서 PCF 샘플링
|
||||
|
||||
### Point Light Shadow (큐브맵)
|
||||
- `PointShadowMap` — 6면 depth 큐브맵 (512x512 per face)
|
||||
- 6방향 view matrix (±X, ±Y, ±Z), 90° perspective projection
|
||||
- 셰이더: light→fragment 방향으로 큐브맵 샘플링
|
||||
- MAX 2개 포인트 라이트 그림자 (성능상 제한)
|
||||
|
||||
### Spot Light Shadow
|
||||
- 기존 ShadowMap 패턴 재사용 (perspective projection)
|
||||
- spot cone outer angle → FOV 변환
|
||||
- MAX 2개 스팟 라이트 그림자
|
||||
|
||||
### CPU 프러스텀 라이트 컬링
|
||||
```rust
|
||||
pub struct Frustum { planes: [Plane; 6] }
|
||||
pub fn extract_frustum(view_proj: &Mat4) -> Frustum
|
||||
pub fn cull_lights(frustum: &Frustum, lights: &[LightData]) -> Vec<usize>
|
||||
```
|
||||
- Sphere vs 6 plane 테스트
|
||||
- Directional: 항상 포함, Point: position+range 구, Spot: cone 바운딩 구
|
||||
|
||||
---
|
||||
|
||||
## B. IBL 개선 + GPU BRDF LUT (`voltex_renderer`)
|
||||
|
||||
### 프로시저럴 스카이 개선
|
||||
- `sample_environment` → Hosek-Wilkie 근사 sky model
|
||||
- `SkyParams` uniform: sun_direction, turbidity
|
||||
- 태양 디스크 렌더링 (pow falloff)
|
||||
- 기존 하드코딩 gradient → 물리 기반 산란
|
||||
|
||||
### SH Irradiance
|
||||
- `compute_sh_coefficients(sun_dir, turbidity) -> [Vec3; 9]` — L2 SH 9계수
|
||||
- CPU에서 환경맵 반구 적분 → SH 계수
|
||||
- 셰이더: `evaluate_sh(normal, coeffs)` — O(1) irradiance 조회
|
||||
- 기존 `sample_environment(N, 1.0)` 대체
|
||||
|
||||
### GPU 컴퓨트 BRDF LUT
|
||||
- `brdf_lut_compute.wgsl` — 컴퓨트 셰이더
|
||||
- Workgroup 16x16, 1024 importance samples per texel
|
||||
- 출력: `Rg16Float` (기존 Rgba8Unorm → 정밀도 향상)
|
||||
- `IblResources::new_gpu(device, queue)` — 컴퓨트 패스로 생성
|
||||
|
||||
---
|
||||
|
||||
## C. 물리 개선 (`voltex_physics`)
|
||||
|
||||
### 각속도/회전 물리
|
||||
- `integrator.rs`: angular_velocity 적분 → rotation 업데이트
|
||||
- 관성 텐서 근사: `inertia_tensor(collider, mass) -> Vec3` (대각 성분만)
|
||||
- Sphere: 2/5 mr², Box: 1/12 m(h²+d²), Capsule: 근사
|
||||
- `solver.rs`: 충돌 토크 `τ = r × impulse` → angular impulse / inertia
|
||||
|
||||
### Sequential Impulse 솔버
|
||||
- `PhysicsConfig.solver_iterations: u32` (기본 4)
|
||||
- 접촉점별 반복: 매 iteration마다 상대속도 재계산 후 추가 임펄스
|
||||
- 기존 단일 반복 코드를 N회 루프로 래핑
|
||||
|
||||
### Sleep/Island 시스템
|
||||
- `RigidBody` 확장: `is_sleeping: bool`, `sleep_timer: f32`
|
||||
- `SLEEP_VELOCITY_THRESHOLD: f32 = 0.01`
|
||||
- `SLEEP_TIME_THRESHOLD: f32 = 0.5` (0.5초 정지 시 sleep)
|
||||
- sleeping 바디: integrate/collision 스킵
|
||||
- 충돌 시 wake_up
|
||||
|
||||
### Ray vs Triangle/Mesh
|
||||
- `ray_vs_triangle(ray, v0, v1, v2) -> Option<(f32, Vec3)>` — Möller–Trumbore
|
||||
- `ray_vs_mesh(ray, vertices, indices) -> Option<(f32, Vec3)>` — 삼각형 순회
|
||||
|
||||
### raycast_all
|
||||
- `raycast_all(world, ray, max_dist) -> Vec<RayHit>` — 모든 hit, 거리순 정렬
|
||||
|
||||
### BVH 개선
|
||||
- `query_pairs` → 재귀 트리 순회 (현재 N² brute force 제거)
|
||||
- raycast: front-to-back 순회, t_min > current_best 스킵
|
||||
- `refit()` — 리프 AABB 갱신 후 부모 전파 (incremental update)
|
||||
|
||||
### CCD
|
||||
- `swept_sphere_vs_aabb(start, end, radius, aabb) -> Option<f32>` — 이동 구 vs AABB
|
||||
- physics_step: `|velocity| * dt > radius` 인 물체만 CCD 적용
|
||||
@@ -16,7 +16,7 @@ use voltex_renderer::{
|
||||
gbuffer_camera_bind_group_layout,
|
||||
lighting_gbuffer_bind_group_layout, lighting_lights_bind_group_layout,
|
||||
lighting_shadow_bind_group_layout,
|
||||
pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
|
||||
pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group,
|
||||
SsgiResources, SsgiUniform,
|
||||
ssgi_gbuffer_bind_group_layout, ssgi_data_bind_group_layout, create_ssgi_pipeline,
|
||||
RtAccel, RtInstance, BlasMeshData, RtShadowResources, RtShadowUniform,
|
||||
@@ -119,6 +119,8 @@ struct AppState {
|
||||
// Keep textures alive
|
||||
_albedo_tex: GpuTexture,
|
||||
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
|
||||
_orm_tex: GpuTexture,
|
||||
_emissive_tex: GpuTexture,
|
||||
_shadow_map: ShadowMap,
|
||||
_ibl: IblResources,
|
||||
_shadow_uniform_buffer: wgpu::Buffer,
|
||||
@@ -171,7 +173,7 @@ impl ApplicationHandler for DeferredDemoApp {
|
||||
// G-Buffer pass bind group layouts
|
||||
// ---------------------------------------------------------------
|
||||
let gbuf_cam_layout = gbuffer_camera_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_full_texture_bind_group_layout(&gpu.device);
|
||||
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
|
||||
|
||||
// Camera dynamic uniform buffer (one CameraUniform per sphere)
|
||||
@@ -204,17 +206,23 @@ impl ApplicationHandler for DeferredDemoApp {
|
||||
}],
|
||||
});
|
||||
|
||||
// PBR textures: white albedo + flat normal
|
||||
// PBR textures: white albedo + flat normal + ORM + emissive
|
||||
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
|
||||
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
|
||||
let pbr_texture_bind_group = create_pbr_texture_bind_group(
|
||||
let orm_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let emissive_tex = GpuTexture::black_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let pbr_texture_bind_group = create_pbr_full_texture_bind_group(
|
||||
&gpu.device,
|
||||
&pbr_tex_layout,
|
||||
&albedo_tex.view,
|
||||
&albedo_tex.sampler,
|
||||
&normal_tex.1,
|
||||
&normal_tex.2,
|
||||
&orm_tex.view,
|
||||
&orm_tex.sampler,
|
||||
&emissive_tex.view,
|
||||
&emissive_tex.sampler,
|
||||
);
|
||||
|
||||
// Material bind group (dynamic offset, group 2)
|
||||
@@ -409,6 +417,9 @@ impl ApplicationHandler for DeferredDemoApp {
|
||||
shadow_map_size: 0.0,
|
||||
shadow_bias: 0.0,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Shadow Uniform Buffer"),
|
||||
@@ -581,6 +592,8 @@ impl ApplicationHandler for DeferredDemoApp {
|
||||
shadow_layout,
|
||||
_albedo_tex: albedo_tex,
|
||||
_normal_tex: normal_tex,
|
||||
_orm_tex: orm_tex,
|
||||
_emissive_tex: emissive_tex,
|
||||
_shadow_map: shadow_map,
|
||||
_ibl: ibl,
|
||||
_shadow_uniform_buffer: shadow_uniform_buffer,
|
||||
|
||||
@@ -11,7 +11,7 @@ use voltex_renderer::{
|
||||
GpuContext, Camera, FpsController, CameraUniform, LightsUniform, LightData,
|
||||
Mesh, GpuTexture, MaterialUniform, generate_sphere, create_pbr_pipeline,
|
||||
ShadowMap, ShadowUniform,
|
||||
IblResources, pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
|
||||
IblResources, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group,
|
||||
};
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
@@ -36,6 +36,8 @@ struct AppState {
|
||||
camera_light_bind_group: wgpu::BindGroup,
|
||||
_albedo_tex: GpuTexture,
|
||||
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
|
||||
_orm_tex: GpuTexture,
|
||||
_emissive_tex: GpuTexture,
|
||||
pbr_texture_bind_group: wgpu::BindGroup,
|
||||
material_bind_group: wgpu::BindGroup,
|
||||
shadow_bind_group: wgpu::BindGroup,
|
||||
@@ -138,7 +140,7 @@ impl ApplicationHandler for IblDemoApp {
|
||||
|
||||
// Bind group layouts
|
||||
let cl_layout = camera_light_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_full_texture_bind_group_layout(&gpu.device);
|
||||
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
|
||||
|
||||
// Camera+Light bind group
|
||||
@@ -163,17 +165,23 @@ impl ApplicationHandler for IblDemoApp {
|
||||
],
|
||||
});
|
||||
|
||||
// PBR texture bind group (albedo + normal)
|
||||
// PBR texture bind group (albedo + normal + ORM + emissive)
|
||||
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
|
||||
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
|
||||
let pbr_texture_bind_group = create_pbr_texture_bind_group(
|
||||
let orm_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let emissive_tex = GpuTexture::black_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let pbr_texture_bind_group = create_pbr_full_texture_bind_group(
|
||||
&gpu.device,
|
||||
&pbr_tex_layout,
|
||||
&albedo_tex.view,
|
||||
&albedo_tex.sampler,
|
||||
&normal_tex.1,
|
||||
&normal_tex.2,
|
||||
&orm_tex.view,
|
||||
&orm_tex.sampler,
|
||||
&emissive_tex.view,
|
||||
&emissive_tex.sampler,
|
||||
);
|
||||
|
||||
// IBL resources
|
||||
@@ -203,6 +211,9 @@ impl ApplicationHandler for IblDemoApp {
|
||||
shadow_map_size: 0.0,
|
||||
shadow_bias: 0.0,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Shadow Uniform Buffer"),
|
||||
@@ -240,6 +251,8 @@ impl ApplicationHandler for IblDemoApp {
|
||||
camera_light_bind_group,
|
||||
_albedo_tex: albedo_tex,
|
||||
_normal_tex: normal_tex,
|
||||
_orm_tex: orm_tex,
|
||||
_emissive_tex: emissive_tex,
|
||||
pbr_texture_bind_group,
|
||||
material_bind_group,
|
||||
shadow_bind_group,
|
||||
|
||||
@@ -11,7 +11,7 @@ use voltex_renderer::{
|
||||
GpuContext, Camera, FpsController, CameraUniform, LightsUniform, LightData,
|
||||
Mesh, GpuTexture, MaterialUniform, generate_sphere, create_pbr_pipeline, obj,
|
||||
ShadowMap, ShadowUniform,
|
||||
IblResources, pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
|
||||
IblResources, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group,
|
||||
};
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
@@ -35,6 +35,8 @@ struct AppState {
|
||||
camera_light_bind_group: wgpu::BindGroup,
|
||||
_albedo_tex: GpuTexture,
|
||||
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
|
||||
_orm_tex: GpuTexture,
|
||||
_emissive_tex: GpuTexture,
|
||||
pbr_texture_bind_group: wgpu::BindGroup,
|
||||
material_bind_group: wgpu::BindGroup,
|
||||
shadow_bind_group: wgpu::BindGroup,
|
||||
@@ -149,7 +151,7 @@ impl ApplicationHandler for MultiLightApp {
|
||||
|
||||
// Bind group layouts
|
||||
let cl_layout = camera_light_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_full_texture_bind_group_layout(&gpu.device);
|
||||
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
|
||||
|
||||
// Camera+Light bind group
|
||||
@@ -174,17 +176,23 @@ impl ApplicationHandler for MultiLightApp {
|
||||
],
|
||||
});
|
||||
|
||||
// PBR texture bind group (albedo + normal)
|
||||
// PBR texture bind group (albedo + normal + ORM + emissive)
|
||||
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
|
||||
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
|
||||
let pbr_texture_bind_group = create_pbr_texture_bind_group(
|
||||
let orm_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let emissive_tex = GpuTexture::black_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let pbr_texture_bind_group = create_pbr_full_texture_bind_group(
|
||||
&gpu.device,
|
||||
&pbr_tex_layout,
|
||||
&albedo_tex.view,
|
||||
&albedo_tex.sampler,
|
||||
&normal_tex.1,
|
||||
&normal_tex.2,
|
||||
&orm_tex.view,
|
||||
&orm_tex.sampler,
|
||||
&emissive_tex.view,
|
||||
&emissive_tex.sampler,
|
||||
);
|
||||
|
||||
// IBL resources
|
||||
@@ -214,6 +222,9 @@ impl ApplicationHandler for MultiLightApp {
|
||||
shadow_map_size: 0.0,
|
||||
shadow_bias: 0.0,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Shadow Uniform Buffer"),
|
||||
@@ -252,6 +263,8 @@ impl ApplicationHandler for MultiLightApp {
|
||||
camera_light_bind_group,
|
||||
_albedo_tex: albedo_tex,
|
||||
_normal_tex: normal_tex,
|
||||
_orm_tex: orm_tex,
|
||||
_emissive_tex: emissive_tex,
|
||||
pbr_texture_bind_group,
|
||||
material_bind_group,
|
||||
shadow_bind_group,
|
||||
|
||||
@@ -11,7 +11,7 @@ use voltex_renderer::{
|
||||
GpuContext, Camera, FpsController, CameraUniform, LightsUniform, LightData,
|
||||
Mesh, GpuTexture, MaterialUniform, generate_sphere, create_pbr_pipeline,
|
||||
ShadowMap, ShadowUniform,
|
||||
IblResources, pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
|
||||
IblResources, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group,
|
||||
};
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
@@ -36,6 +36,8 @@ struct AppState {
|
||||
camera_light_bind_group: wgpu::BindGroup,
|
||||
_albedo_tex: GpuTexture,
|
||||
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
|
||||
_orm_tex: GpuTexture,
|
||||
_emissive_tex: GpuTexture,
|
||||
pbr_texture_bind_group: wgpu::BindGroup,
|
||||
material_bind_group: wgpu::BindGroup,
|
||||
shadow_bind_group: wgpu::BindGroup,
|
||||
@@ -134,7 +136,7 @@ impl ApplicationHandler for PbrDemoApp {
|
||||
|
||||
// Bind group layouts
|
||||
let cl_layout = camera_light_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_full_texture_bind_group_layout(&gpu.device);
|
||||
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
|
||||
|
||||
// Camera+Light bind group
|
||||
@@ -159,17 +161,23 @@ impl ApplicationHandler for PbrDemoApp {
|
||||
],
|
||||
});
|
||||
|
||||
// PBR texture bind group (albedo + normal)
|
||||
// PBR texture bind group (albedo + normal + ORM + emissive)
|
||||
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
|
||||
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
|
||||
let pbr_texture_bind_group = create_pbr_texture_bind_group(
|
||||
let orm_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let emissive_tex = GpuTexture::black_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let pbr_texture_bind_group = create_pbr_full_texture_bind_group(
|
||||
&gpu.device,
|
||||
&pbr_tex_layout,
|
||||
&albedo_tex.view,
|
||||
&albedo_tex.sampler,
|
||||
&normal_tex.1,
|
||||
&normal_tex.2,
|
||||
&orm_tex.view,
|
||||
&orm_tex.sampler,
|
||||
&emissive_tex.view,
|
||||
&emissive_tex.sampler,
|
||||
);
|
||||
|
||||
// IBL resources
|
||||
@@ -199,6 +207,9 @@ impl ApplicationHandler for PbrDemoApp {
|
||||
shadow_map_size: 0.0,
|
||||
shadow_bias: 0.0,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Shadow Uniform Buffer"),
|
||||
@@ -236,6 +247,8 @@ impl ApplicationHandler for PbrDemoApp {
|
||||
camera_light_bind_group,
|
||||
_albedo_tex: albedo_tex,
|
||||
_normal_tex: normal_tex,
|
||||
_orm_tex: orm_tex,
|
||||
_emissive_tex: emissive_tex,
|
||||
pbr_texture_bind_group,
|
||||
material_bind_group,
|
||||
shadow_bind_group,
|
||||
|
||||
@@ -12,7 +12,7 @@ use voltex_renderer::{
|
||||
Mesh, GpuTexture, MaterialUniform, generate_sphere, create_pbr_pipeline, obj,
|
||||
ShadowMap, ShadowUniform, ShadowPassUniform, SHADOW_MAP_SIZE,
|
||||
create_shadow_pipeline, shadow_pass_bind_group_layout,
|
||||
IblResources, pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
|
||||
IblResources, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group,
|
||||
};
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
@@ -39,6 +39,8 @@ struct AppState {
|
||||
camera_light_bind_group: wgpu::BindGroup,
|
||||
_albedo_tex: GpuTexture,
|
||||
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
|
||||
_orm_tex: GpuTexture,
|
||||
_emissive_tex: GpuTexture,
|
||||
pbr_texture_bind_group: wgpu::BindGroup,
|
||||
material_bind_group: wgpu::BindGroup,
|
||||
// Shadow resources
|
||||
@@ -188,7 +190,7 @@ impl ApplicationHandler for ShadowDemoApp {
|
||||
|
||||
// Bind group layouts
|
||||
let cl_layout = camera_light_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
|
||||
let pbr_tex_layout = pbr_full_texture_bind_group_layout(&gpu.device);
|
||||
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
|
||||
|
||||
// Camera+Light bind group
|
||||
@@ -211,17 +213,23 @@ impl ApplicationHandler for ShadowDemoApp {
|
||||
],
|
||||
});
|
||||
|
||||
// PBR texture bind group (albedo + normal)
|
||||
// PBR texture bind group (albedo + normal + ORM + emissive)
|
||||
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
|
||||
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
|
||||
let pbr_texture_bind_group = create_pbr_texture_bind_group(
|
||||
let orm_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let emissive_tex = GpuTexture::black_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
|
||||
let pbr_texture_bind_group = create_pbr_full_texture_bind_group(
|
||||
&gpu.device,
|
||||
&pbr_tex_layout,
|
||||
&albedo_tex.view,
|
||||
&albedo_tex.sampler,
|
||||
&normal_tex.1,
|
||||
&normal_tex.2,
|
||||
&orm_tex.view,
|
||||
&orm_tex.sampler,
|
||||
&emissive_tex.view,
|
||||
&emissive_tex.sampler,
|
||||
);
|
||||
|
||||
// IBL resources
|
||||
@@ -250,6 +258,9 @@ impl ApplicationHandler for ShadowDemoApp {
|
||||
shadow_map_size: SHADOW_MAP_SIZE as f32,
|
||||
shadow_bias: 0.005,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Shadow Uniform Buffer"),
|
||||
@@ -311,6 +322,8 @@ impl ApplicationHandler for ShadowDemoApp {
|
||||
camera_light_bind_group,
|
||||
_albedo_tex: albedo_tex,
|
||||
_normal_tex: normal_tex,
|
||||
_orm_tex: orm_tex,
|
||||
_emissive_tex: emissive_tex,
|
||||
pbr_texture_bind_group,
|
||||
material_bind_group,
|
||||
shadow_map,
|
||||
@@ -473,6 +486,9 @@ impl ApplicationHandler for ShadowDemoApp {
|
||||
shadow_map_size: SHADOW_MAP_SIZE as f32,
|
||||
shadow_bias: 0.005,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
state.gpu.queue.write_buffer(
|
||||
&state.shadow_uniform_buffer,
|
||||
|
||||
@@ -267,6 +267,9 @@ impl ApplicationHandler for SurvivorApp {
|
||||
shadow_map_size: SHADOW_MAP_SIZE as f32,
|
||||
shadow_bias: 0.005,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
let shadow_uniform_buffer =
|
||||
gpu.device
|
||||
@@ -595,6 +598,9 @@ impl ApplicationHandler for SurvivorApp {
|
||||
shadow_map_size: SHADOW_MAP_SIZE as f32,
|
||||
shadow_bias: 0.005,
|
||||
_padding: [0.0; 2],
|
||||
sun_direction: [0.5, -0.7, 0.5],
|
||||
turbidity: 3.0,
|
||||
sh_coefficients: [[0.0; 4]; 7],
|
||||
};
|
||||
state.gpu.queue.write_buffer(
|
||||
&state.shadow_uniform_buffer,
|
||||
|
||||
Reference in New Issue
Block a user