Compare commits
80 Commits
622c91a954
...
7375b15fcf
| Author | SHA1 | Date | |
|---|---|---|---|
| 7375b15fcf | |||
| 37addfdf03 | |||
| 039dbe0d09 | |||
| 9a5d3b7b97 | |||
| cc842d7c13 | |||
| 84dc7aeb20 | |||
| d7b5fdde31 | |||
| 96efe113b2 | |||
| 98d40d6520 | |||
| 6beafc6949 | |||
| 28b24226e7 | |||
| 8685d7c4aa | |||
| 447473598a | |||
| 831365a622 | |||
| 0f08c65a1e | |||
| 1b5da4d0d5 | |||
| 07d7475410 | |||
| d4ef4cf1ce | |||
| 764ee96ec1 | |||
| 41c7f9607e | |||
| d321c0695c | |||
| 1f855b7bf6 | |||
| 1ea2d340e6 | |||
| a60a25e9ba | |||
| e4879b6a31 | |||
| 17ea3f4856 | |||
| c5f6511fc2 | |||
| 2703e73ef0 | |||
| 7dbd94ebab | |||
| 72b517efb2 | |||
| 5f21ec0afe | |||
| bbb11d9d47 | |||
| c6ac2ded81 | |||
| f79889cbf1 | |||
| afb95c9fb1 | |||
| ef8c39b5ae | |||
| 43411b6fd9 | |||
| 5b3b06c318 | |||
| 6121530bfe | |||
| ce1a79cab6 | |||
| 1d38434aec | |||
| 0cc6df15a3 | |||
| ce69c81eca | |||
| b2fd3988f5 | |||
| cccc54c438 | |||
| 58bce839fe | |||
| 94e7f6262e | |||
| 3b0a65ed17 | |||
| e008178316 | |||
| 74974dbff0 | |||
| 53505ee9b7 | |||
| 295944d237 | |||
| 587c75f6c2 | |||
| 0857446d74 | |||
| b65585b739 | |||
| 4ef6e83710 | |||
| 58cbdf8400 | |||
| b965d78835 | |||
| 65b86c293c | |||
| 7fbc88b86f | |||
| 8ecf883ef6 | |||
| e14a53c3fa | |||
| 4d5fc5e44c | |||
| 34e257e887 | |||
| 14fe532432 | |||
| 9f42300109 | |||
| 6ef248f76d | |||
| ae20590f6e | |||
| d93253dfb1 | |||
| fed47e9242 | |||
| 8468f3cce2 | |||
| c19dc6421a | |||
| f7ef228b49 | |||
| 1571aa5f97 | |||
| 36fedb48bf | |||
| a642f8ef7e | |||
| 14784c731c | |||
| a69554eede | |||
| 867f8a0aa0 | |||
| eb454800a9 |
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -491,7 +491,9 @@ dependencies = [
|
||||
"env_logger",
|
||||
"log",
|
||||
"pollster",
|
||||
"voltex_ecs",
|
||||
"voltex_editor",
|
||||
"voltex_math",
|
||||
"voltex_platform",
|
||||
"voltex_renderer",
|
||||
"wgpu",
|
||||
@@ -2080,6 +2082,7 @@ checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
name = "voltex_ai"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"voltex_ecs",
|
||||
"voltex_math",
|
||||
]
|
||||
|
||||
@@ -2106,6 +2109,9 @@ name = "voltex_editor"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"voltex_ecs",
|
||||
"voltex_math",
|
||||
"voltex_renderer",
|
||||
"wgpu",
|
||||
]
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
pub mod nav_agent;
|
||||
pub mod navmesh;
|
||||
pub mod pathfinding;
|
||||
pub mod steering;
|
||||
|
||||
pub use nav_agent::NavAgent;
|
||||
pub use navmesh::{NavMesh, NavTriangle};
|
||||
pub use pathfinding::find_path;
|
||||
pub use steering::{SteeringAgent, seek, flee, arrive, wander, follow_path};
|
||||
|
||||
85
crates/voltex_ai/src/nav_agent.rs
Normal file
85
crates/voltex_ai/src/nav_agent.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use voltex_math::Vec3;
|
||||
|
||||
/// ECS component for AI pathfinding navigation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NavAgent {
|
||||
pub target: Option<Vec3>,
|
||||
pub speed: f32,
|
||||
pub path: Vec<Vec3>,
|
||||
pub current_waypoint: usize,
|
||||
pub reached: bool,
|
||||
}
|
||||
|
||||
impl NavAgent {
|
||||
pub fn new(speed: f32) -> Self {
|
||||
NavAgent {
|
||||
target: None,
|
||||
speed,
|
||||
path: Vec::new(),
|
||||
current_waypoint: 0,
|
||||
reached: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_target(&mut self, target: Vec3) {
|
||||
self.target = Some(target);
|
||||
self.path.clear();
|
||||
self.current_waypoint = 0;
|
||||
self.reached = false;
|
||||
}
|
||||
|
||||
pub fn clear_target(&mut self) {
|
||||
self.target = None;
|
||||
self.path.clear();
|
||||
self.current_waypoint = 0;
|
||||
self.reached = false;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_defaults() {
|
||||
let agent = NavAgent::new(5.0);
|
||||
assert!((agent.speed - 5.0).abs() < 1e-6);
|
||||
assert!(agent.target.is_none());
|
||||
assert!(agent.path.is_empty());
|
||||
assert_eq!(agent.current_waypoint, 0);
|
||||
assert!(!agent.reached);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_target() {
|
||||
let mut agent = NavAgent::new(3.0);
|
||||
agent.set_target(Vec3::new(10.0, 0.0, 5.0));
|
||||
assert!(agent.target.is_some());
|
||||
let t = agent.target.unwrap();
|
||||
assert!((t.x - 10.0).abs() < 1e-6);
|
||||
assert!(!agent.reached);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_target_clears_path() {
|
||||
let mut agent = NavAgent::new(3.0);
|
||||
agent.path = vec![Vec3::new(1.0, 0.0, 0.0), Vec3::new(2.0, 0.0, 0.0)];
|
||||
agent.current_waypoint = 1;
|
||||
agent.set_target(Vec3::new(5.0, 0.0, 5.0));
|
||||
assert!(agent.path.is_empty());
|
||||
assert_eq!(agent.current_waypoint, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_target() {
|
||||
let mut agent = NavAgent::new(3.0);
|
||||
agent.set_target(Vec3::new(10.0, 0.0, 5.0));
|
||||
agent.path = vec![Vec3::new(1.0, 0.0, 0.0)];
|
||||
agent.reached = true;
|
||||
agent.clear_target();
|
||||
assert!(agent.target.is_none());
|
||||
assert!(agent.path.is_empty());
|
||||
assert_eq!(agent.current_waypoint, 0);
|
||||
assert!(!agent.reached);
|
||||
}
|
||||
}
|
||||
@@ -68,6 +68,128 @@ impl NavMesh {
|
||||
)
|
||||
}
|
||||
|
||||
/// Serialize the NavMesh to a binary format with header.
|
||||
///
|
||||
/// Format:
|
||||
/// - Magic: "VNAV" (4 bytes)
|
||||
/// - Version: u32 (little-endian)
|
||||
/// - num_vertices: u32
|
||||
/// - vertices: [f32; 3] * num_vertices
|
||||
/// - num_triangles: u32
|
||||
/// - triangles: for each triangle: indices [u32; 3] + neighbors [i32; 3] (-1 for None)
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut data = Vec::new();
|
||||
// Magic
|
||||
data.extend_from_slice(b"VNAV");
|
||||
// Version
|
||||
data.extend_from_slice(&1u32.to_le_bytes());
|
||||
|
||||
// Vertices
|
||||
data.extend_from_slice(&(self.vertices.len() as u32).to_le_bytes());
|
||||
for v in &self.vertices {
|
||||
data.extend_from_slice(&v.x.to_le_bytes());
|
||||
data.extend_from_slice(&v.y.to_le_bytes());
|
||||
data.extend_from_slice(&v.z.to_le_bytes());
|
||||
}
|
||||
|
||||
// Triangles
|
||||
data.extend_from_slice(&(self.triangles.len() as u32).to_le_bytes());
|
||||
for tri in &self.triangles {
|
||||
for &idx in &tri.indices {
|
||||
data.extend_from_slice(&(idx as u32).to_le_bytes());
|
||||
}
|
||||
for &nb in &tri.neighbors {
|
||||
let val: i32 = match nb {
|
||||
Some(i) => i as i32,
|
||||
None => -1,
|
||||
};
|
||||
data.extend_from_slice(&val.to_le_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
/// Deserialize a NavMesh from binary data with header validation.
|
||||
pub fn deserialize(data: &[u8]) -> Result<Self, String> {
|
||||
if data.len() < 8 {
|
||||
return Err("data too short for header".to_string());
|
||||
}
|
||||
|
||||
// Validate magic
|
||||
if &data[0..4] != b"VNAV" {
|
||||
return Err(format!(
|
||||
"invalid magic: expected VNAV, got {:?}",
|
||||
&data[0..4]
|
||||
));
|
||||
}
|
||||
|
||||
// Read version
|
||||
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
|
||||
if version != 1 {
|
||||
return Err(format!("unsupported version: {}", version));
|
||||
}
|
||||
|
||||
// Delegate rest to offset-based reading
|
||||
let mut offset = 8usize;
|
||||
|
||||
let read_u32 = |off: &mut usize| -> Result<u32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = u32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let read_i32 = |off: &mut usize| -> Result<i32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = i32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let read_f32 = |off: &mut usize| -> Result<f32, String> {
|
||||
if *off + 4 > data.len() {
|
||||
return Err("unexpected end of data".to_string());
|
||||
}
|
||||
let val = f32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
|
||||
*off += 4;
|
||||
Ok(val)
|
||||
};
|
||||
|
||||
let vc = read_u32(&mut offset)? as usize;
|
||||
let mut vertices = Vec::with_capacity(vc);
|
||||
for _ in 0..vc {
|
||||
let x = read_f32(&mut offset)?;
|
||||
let y = read_f32(&mut offset)?;
|
||||
let z = read_f32(&mut offset)?;
|
||||
vertices.push(Vec3::new(x, y, z));
|
||||
}
|
||||
|
||||
let tc = read_u32(&mut offset)? as usize;
|
||||
let mut triangles = Vec::with_capacity(tc);
|
||||
for _ in 0..tc {
|
||||
let i0 = read_u32(&mut offset)? as usize;
|
||||
let i1 = read_u32(&mut offset)? as usize;
|
||||
let i2 = read_u32(&mut offset)? as usize;
|
||||
let n0 = read_i32(&mut offset)?;
|
||||
let n1 = read_i32(&mut offset)?;
|
||||
let n2 = read_i32(&mut offset)?;
|
||||
let to_opt = |v: i32| -> Option<usize> {
|
||||
if v < 0 { None } else { Some(v as usize) }
|
||||
};
|
||||
triangles.push(NavTriangle {
|
||||
indices: [i0, i1, i2],
|
||||
neighbors: [to_opt(n0), to_opt(n1), to_opt(n2)],
|
||||
});
|
||||
}
|
||||
|
||||
Ok(NavMesh::new(vertices, triangles))
|
||||
}
|
||||
|
||||
/// Return the shared edge (portal) vertices between two adjacent triangles.
|
||||
/// Returns (left, right) vertices of the portal edge as seen from `from_tri` looking toward `to_tri`.
|
||||
/// Returns None if the triangles are not adjacent.
|
||||
@@ -312,4 +434,64 @@ mod tests {
|
||||
let result = deserialize_navmesh(&[]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_method_serialize_deserialize_roundtrip() {
|
||||
let nm = make_simple_navmesh();
|
||||
let data = nm.serialize();
|
||||
let restored = NavMesh::deserialize(&data).expect("should deserialize");
|
||||
assert_eq!(nm.vertices.len(), restored.vertices.len());
|
||||
assert_eq!(nm.triangles.len(), restored.triangles.len());
|
||||
for (a, b) in nm.vertices.iter().zip(restored.vertices.iter()) {
|
||||
assert!((a.x - b.x).abs() < 1e-6);
|
||||
assert!((a.y - b.y).abs() < 1e-6);
|
||||
assert!((a.z - b.z).abs() < 1e-6);
|
||||
}
|
||||
for (a, b) in nm.triangles.iter().zip(restored.triangles.iter()) {
|
||||
assert_eq!(a.indices, b.indices);
|
||||
assert_eq!(a.neighbors, b.neighbors);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_invalid_magic() {
|
||||
let data = b"XXXX\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
|
||||
assert!(NavMesh::deserialize(data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_too_short() {
|
||||
let data = b"VNA";
|
||||
assert!(NavMesh::deserialize(data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_empty_navmesh() {
|
||||
let nm = NavMesh::new(vec![], vec![]);
|
||||
let data = nm.serialize();
|
||||
let restored = NavMesh::deserialize(&data).expect("should deserialize empty");
|
||||
assert!(restored.vertices.is_empty());
|
||||
assert!(restored.triangles.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_header_format() {
|
||||
let nm = NavMesh::new(vec![], vec![]);
|
||||
let data = nm.serialize();
|
||||
// Check magic
|
||||
assert_eq!(&data[0..4], b"VNAV");
|
||||
// Check version = 1
|
||||
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
|
||||
assert_eq!(version, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unsupported_version() {
|
||||
let mut data = Vec::new();
|
||||
data.extend_from_slice(b"VNAV");
|
||||
data.extend_from_slice(&99u32.to_le_bytes());
|
||||
data.extend_from_slice(&0u32.to_le_bytes());
|
||||
data.extend_from_slice(&0u32.to_le_bytes());
|
||||
assert!(NavMesh::deserialize(&data).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
78
crates/voltex_audio/src/audio_source.rs
Normal file
78
crates/voltex_audio/src/audio_source.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
/// ECS component that attaches audio playback to an entity.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AudioSource {
|
||||
pub clip_id: u32,
|
||||
pub volume: f32,
|
||||
pub spatial: bool,
|
||||
pub looping: bool,
|
||||
pub playing: bool,
|
||||
pub played_once: bool,
|
||||
}
|
||||
|
||||
impl AudioSource {
|
||||
pub fn new(clip_id: u32) -> Self {
|
||||
AudioSource {
|
||||
clip_id,
|
||||
volume: 1.0,
|
||||
spatial: false,
|
||||
looping: false,
|
||||
playing: false,
|
||||
played_once: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spatial(mut self) -> Self { self.spatial = true; self }
|
||||
pub fn looping(mut self) -> Self { self.looping = true; self }
|
||||
|
||||
pub fn play(&mut self) {
|
||||
self.playing = true;
|
||||
self.played_once = false;
|
||||
}
|
||||
|
||||
pub fn stop(&mut self) {
|
||||
self.playing = false;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_defaults() {
|
||||
let src = AudioSource::new(42);
|
||||
assert_eq!(src.clip_id, 42);
|
||||
assert!((src.volume - 1.0).abs() < 1e-6);
|
||||
assert!(!src.spatial);
|
||||
assert!(!src.looping);
|
||||
assert!(!src.playing);
|
||||
assert!(!src.played_once);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_pattern() {
|
||||
let src = AudioSource::new(1).spatial().looping();
|
||||
assert!(src.spatial);
|
||||
assert!(src.looping);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_play_stop() {
|
||||
let mut src = AudioSource::new(1);
|
||||
src.play();
|
||||
assert!(src.playing);
|
||||
assert!(!src.played_once);
|
||||
src.played_once = true;
|
||||
src.play(); // re-play should reset played_once
|
||||
assert!(!src.played_once);
|
||||
src.stop();
|
||||
assert!(!src.playing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_volume() {
|
||||
let mut src = AudioSource::new(1);
|
||||
src.volume = 0.5;
|
||||
assert!((src.volume - 0.5).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
126
crates/voltex_audio/src/hrtf.rs
Normal file
126
crates/voltex_audio/src/hrtf.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use std::f32::consts::PI;
|
||||
|
||||
/// Head radius in meters (average human).
|
||||
const HEAD_RADIUS: f32 = 0.0875;
|
||||
/// Speed of sound in m/s.
|
||||
const SPEED_OF_SOUND: f32 = 343.0;
|
||||
|
||||
/// HRTF filter result for a sound at a given azimuth angle.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct HrtfResult {
|
||||
pub left_delay_samples: f32, // ITD: delay for left ear in samples
|
||||
pub right_delay_samples: f32, // ITD: delay for right ear in samples
|
||||
pub left_gain: f32, // ILD: gain for left ear (0.0-1.0)
|
||||
pub right_gain: f32, // ILD: gain for right ear (0.0-1.0)
|
||||
}
|
||||
|
||||
/// Calculate HRTF parameters from azimuth angle.
|
||||
/// azimuth: angle in radians, 0 = front, PI/2 = right, -PI/2 = left, PI = behind.
|
||||
pub fn calculate_hrtf(azimuth: f32, sample_rate: u32) -> HrtfResult {
|
||||
// ITD: Woodworth formula
|
||||
// time_diff = (HEAD_RADIUS / SPEED_OF_SOUND) * (azimuth + sin(azimuth))
|
||||
let itd = (HEAD_RADIUS / SPEED_OF_SOUND) * (azimuth.abs() + azimuth.abs().sin());
|
||||
let delay_samples = itd * sample_rate as f32;
|
||||
|
||||
// ILD: simplified frequency-independent model
|
||||
// Sound is louder on the side facing the source
|
||||
let shadow = 0.5 * (1.0 + azimuth.cos()); // 1.0 at front, 0.5 at side, 0.0 at back
|
||||
|
||||
let (left_delay, right_delay, left_gain, right_gain);
|
||||
if azimuth >= 0.0 {
|
||||
// Sound from right side
|
||||
left_delay = delay_samples;
|
||||
right_delay = 0.0;
|
||||
left_gain = (0.3 + 0.7 * shadow).min(1.0); // shadowed side
|
||||
right_gain = 1.0;
|
||||
} else {
|
||||
// Sound from left side
|
||||
left_delay = 0.0;
|
||||
right_delay = delay_samples;
|
||||
left_gain = 1.0;
|
||||
right_gain = (0.3 + 0.7 * shadow).min(1.0);
|
||||
}
|
||||
|
||||
HrtfResult {
|
||||
left_delay_samples: left_delay,
|
||||
right_delay_samples: right_delay,
|
||||
left_gain,
|
||||
right_gain,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate azimuth angle from listener position/forward to sound position.
|
||||
pub fn azimuth_from_positions(
|
||||
listener_pos: [f32; 3],
|
||||
listener_forward: [f32; 3],
|
||||
listener_right: [f32; 3],
|
||||
sound_pos: [f32; 3],
|
||||
) -> f32 {
|
||||
let dx = sound_pos[0] - listener_pos[0];
|
||||
let dy = sound_pos[1] - listener_pos[1];
|
||||
let dz = sound_pos[2] - listener_pos[2];
|
||||
let len = (dx * dx + dy * dy + dz * dz).sqrt();
|
||||
if len < 1e-6 {
|
||||
return 0.0;
|
||||
}
|
||||
let dir = [dx / len, dy / len, dz / len];
|
||||
|
||||
// Dot with right vector gives sin(azimuth)
|
||||
let right_dot =
|
||||
dir[0] * listener_right[0] + dir[1] * listener_right[1] + dir[2] * listener_right[2];
|
||||
// Dot with forward vector gives cos(azimuth)
|
||||
let fwd_dot = dir[0] * listener_forward[0]
|
||||
+ dir[1] * listener_forward[1]
|
||||
+ dir[2] * listener_forward[2];
|
||||
|
||||
right_dot.atan2(fwd_dot)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_hrtf_front() {
|
||||
let r = calculate_hrtf(0.0, 44100);
|
||||
assert!((r.left_delay_samples - 0.0).abs() < 1.0);
|
||||
assert!((r.right_delay_samples - 0.0).abs() < 1.0);
|
||||
assert!((r.left_gain - r.right_gain).abs() < 0.01); // symmetric
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hrtf_right() {
|
||||
let r = calculate_hrtf(PI / 2.0, 44100);
|
||||
assert!(r.left_delay_samples > r.right_delay_samples);
|
||||
assert!(r.left_gain < r.right_gain);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hrtf_left() {
|
||||
let r = calculate_hrtf(-PI / 2.0, 44100);
|
||||
assert!(r.right_delay_samples > r.left_delay_samples);
|
||||
assert!(r.right_gain < r.left_gain);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_azimuth_front() {
|
||||
let az = azimuth_from_positions(
|
||||
[0.0; 3],
|
||||
[0.0, 0.0, -1.0],
|
||||
[1.0, 0.0, 0.0],
|
||||
[0.0, 0.0, -5.0],
|
||||
);
|
||||
assert!(az.abs() < 0.1); // roughly front
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_azimuth_right() {
|
||||
let az = azimuth_from_positions(
|
||||
[0.0; 3],
|
||||
[0.0, 0.0, -1.0],
|
||||
[1.0, 0.0, 0.0],
|
||||
[5.0, 0.0, 0.0],
|
||||
);
|
||||
assert!((az - PI / 2.0).abs() < 0.1);
|
||||
}
|
||||
}
|
||||
@@ -5,11 +5,18 @@ pub mod mixing;
|
||||
pub mod wasapi;
|
||||
pub mod audio_system;
|
||||
pub mod spatial;
|
||||
pub mod hrtf;
|
||||
pub mod mix_group;
|
||||
pub mod audio_source;
|
||||
pub mod reverb;
|
||||
pub mod occlusion;
|
||||
|
||||
pub use audio_clip::AudioClip;
|
||||
pub use audio_source::AudioSource;
|
||||
pub use wav::{parse_wav, generate_wav_bytes};
|
||||
pub use mixing::{PlayingSound, mix_sounds};
|
||||
pub use audio_system::AudioSystem;
|
||||
pub use spatial::{Listener, SpatialParams, distance_attenuation, stereo_pan, compute_spatial_gains};
|
||||
pub use mix_group::{MixGroup, MixerState};
|
||||
pub use reverb::{Reverb, Echo, DelayLine};
|
||||
pub use occlusion::{OcclusionResult, LowPassFilter, calculate_occlusion};
|
||||
|
||||
96
crates/voltex_audio/src/occlusion.rs
Normal file
96
crates/voltex_audio/src/occlusion.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
/// Audio occlusion parameters.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct OcclusionResult {
|
||||
pub volume_multiplier: f32, // 0.0-1.0, reduced by occlusion
|
||||
pub lowpass_cutoff: f32, // Hz, lower = more muffled
|
||||
}
|
||||
|
||||
/// Simple ray-based occlusion check.
|
||||
/// `occlusion_factor`: 0.0 = no occlusion (direct line of sight), 1.0 = fully occluded.
|
||||
pub fn calculate_occlusion(occlusion_factor: f32) -> OcclusionResult {
|
||||
let factor = occlusion_factor.clamp(0.0, 1.0);
|
||||
OcclusionResult {
|
||||
volume_multiplier: 1.0 - factor * 0.7, // Max 70% volume reduction
|
||||
lowpass_cutoff: 20000.0 * (1.0 - factor * 0.8), // 20kHz → 4kHz when fully occluded
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple low-pass filter (one-pole IIR).
|
||||
pub struct LowPassFilter {
|
||||
prev_output: f32,
|
||||
alpha: f32,
|
||||
}
|
||||
|
||||
impl LowPassFilter {
|
||||
pub fn new(cutoff_hz: f32, sample_rate: u32) -> Self {
|
||||
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_hz);
|
||||
let dt = 1.0 / sample_rate as f32;
|
||||
let alpha = dt / (rc + dt);
|
||||
LowPassFilter { prev_output: 0.0, alpha }
|
||||
}
|
||||
|
||||
pub fn set_cutoff(&mut self, cutoff_hz: f32, sample_rate: u32) {
|
||||
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_hz);
|
||||
let dt = 1.0 / sample_rate as f32;
|
||||
self.alpha = dt / (rc + dt);
|
||||
}
|
||||
|
||||
pub fn process(&mut self, input: f32) -> f32 {
|
||||
self.prev_output = self.prev_output + self.alpha * (input - self.prev_output);
|
||||
self.prev_output
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.prev_output = 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_no_occlusion() {
|
||||
let r = calculate_occlusion(0.0);
|
||||
assert!((r.volume_multiplier - 1.0).abs() < 1e-6);
|
||||
assert!((r.lowpass_cutoff - 20000.0).abs() < 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_full_occlusion() {
|
||||
let r = calculate_occlusion(1.0);
|
||||
assert!((r.volume_multiplier - 0.3).abs() < 1e-6);
|
||||
assert!((r.lowpass_cutoff - 4000.0).abs() < 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_occlusion() {
|
||||
let r = calculate_occlusion(0.5);
|
||||
assert!(r.volume_multiplier > 0.3 && r.volume_multiplier < 1.0);
|
||||
assert!(r.lowpass_cutoff > 4000.0 && r.lowpass_cutoff < 20000.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lowpass_attenuates_high_freq() {
|
||||
let mut lpf = LowPassFilter::new(100.0, 44100); // very low cutoff
|
||||
// High frequency signal (alternating +1/-1) should be attenuated
|
||||
let mut max_output = 0.0_f32;
|
||||
for i in 0..100 {
|
||||
let input = if i % 2 == 0 { 1.0 } else { -1.0 };
|
||||
let output = lpf.process(input);
|
||||
max_output = max_output.max(output.abs());
|
||||
}
|
||||
assert!(max_output < 0.5, "high freq should be attenuated, got {}", max_output);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lowpass_passes_dc() {
|
||||
let mut lpf = LowPassFilter::new(5000.0, 44100);
|
||||
// DC signal (constant 1.0) should pass through
|
||||
for _ in 0..1000 {
|
||||
lpf.process(1.0);
|
||||
}
|
||||
let output = lpf.process(1.0);
|
||||
assert!((output - 1.0).abs() < 0.01, "DC should pass, got {}", output);
|
||||
}
|
||||
}
|
||||
189
crates/voltex_audio/src/reverb.rs
Normal file
189
crates/voltex_audio/src/reverb.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
/// Simple delay line for echo effect.
|
||||
pub struct DelayLine {
|
||||
pub(crate) buffer: Vec<f32>,
|
||||
pub(crate) write_pos: usize,
|
||||
delay_samples: usize,
|
||||
}
|
||||
|
||||
impl DelayLine {
|
||||
pub fn new(delay_samples: usize) -> Self {
|
||||
DelayLine { buffer: vec![0.0; delay_samples.max(1)], write_pos: 0, delay_samples }
|
||||
}
|
||||
|
||||
pub fn process(&mut self, input: f32, feedback: f32) -> f32 {
|
||||
let read_pos = (self.write_pos + self.buffer.len() - self.delay_samples) % self.buffer.len();
|
||||
let delayed = self.buffer[read_pos];
|
||||
self.buffer[self.write_pos] = input + delayed * feedback;
|
||||
self.write_pos = (self.write_pos + 1) % self.buffer.len();
|
||||
delayed
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple Schroeder reverb using 4 parallel comb filters + 2 allpass filters.
|
||||
pub struct Reverb {
|
||||
comb_filters: Vec<CombFilter>,
|
||||
allpass_filters: Vec<AllpassFilter>,
|
||||
pub wet: f32, // 0.0-1.0
|
||||
pub dry: f32, // 0.0-1.0
|
||||
}
|
||||
|
||||
struct CombFilter {
|
||||
delay: DelayLine,
|
||||
feedback: f32,
|
||||
}
|
||||
|
||||
impl CombFilter {
|
||||
fn new(delay_samples: usize, feedback: f32) -> Self {
|
||||
CombFilter { delay: DelayLine::new(delay_samples), feedback }
|
||||
}
|
||||
fn process(&mut self, input: f32) -> f32 {
|
||||
self.delay.process(input, self.feedback)
|
||||
}
|
||||
}
|
||||
|
||||
struct AllpassFilter {
|
||||
delay: DelayLine,
|
||||
gain: f32,
|
||||
}
|
||||
|
||||
impl AllpassFilter {
|
||||
fn new(delay_samples: usize, gain: f32) -> Self {
|
||||
AllpassFilter { delay: DelayLine::new(delay_samples), gain }
|
||||
}
|
||||
fn process(&mut self, input: f32) -> f32 {
|
||||
let delayed = self.delay.process(input, 0.0);
|
||||
let _output = -self.gain * input + delayed + self.gain * delayed;
|
||||
// Actually: allpass: output = -g*input + delayed, buffer = input + g*delayed
|
||||
// Simplified:
|
||||
let buf_val = self.delay.buffer[(self.delay.write_pos + self.delay.buffer.len() - 1) % self.delay.buffer.len()];
|
||||
-self.gain * input + buf_val
|
||||
}
|
||||
}
|
||||
|
||||
impl Reverb {
|
||||
pub fn new(sample_rate: u32) -> Self {
|
||||
// Schroeder reverb with prime-number delay lengths
|
||||
let sr = sample_rate as f32;
|
||||
Reverb {
|
||||
comb_filters: vec![
|
||||
CombFilter::new((0.0297 * sr) as usize, 0.805),
|
||||
CombFilter::new((0.0371 * sr) as usize, 0.827),
|
||||
CombFilter::new((0.0411 * sr) as usize, 0.783),
|
||||
CombFilter::new((0.0437 * sr) as usize, 0.764),
|
||||
],
|
||||
allpass_filters: vec![
|
||||
AllpassFilter::new((0.005 * sr) as usize, 0.7),
|
||||
AllpassFilter::new((0.0017 * sr) as usize, 0.7),
|
||||
],
|
||||
wet: 0.3,
|
||||
dry: 0.7,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process(&mut self, input: f32) -> f32 {
|
||||
// Sum comb filter outputs
|
||||
let mut comb_sum = 0.0;
|
||||
for comb in &mut self.comb_filters {
|
||||
comb_sum += comb.process(input);
|
||||
}
|
||||
comb_sum /= self.comb_filters.len() as f32;
|
||||
|
||||
// Chain through allpass filters
|
||||
let mut output = comb_sum;
|
||||
for ap in &mut self.allpass_filters {
|
||||
output = ap.process(output);
|
||||
}
|
||||
|
||||
input * self.dry + output * self.wet
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple echo (single delay + feedback).
|
||||
pub struct Echo {
|
||||
delay: DelayLine,
|
||||
pub feedback: f32,
|
||||
pub wet: f32,
|
||||
pub dry: f32,
|
||||
}
|
||||
|
||||
impl Echo {
|
||||
pub fn new(delay_ms: f32, sample_rate: u32) -> Self {
|
||||
let samples = (delay_ms * sample_rate as f32 / 1000.0) as usize;
|
||||
Echo { delay: DelayLine::new(samples), feedback: 0.5, wet: 0.3, dry: 0.7 }
|
||||
}
|
||||
|
||||
pub fn process(&mut self, input: f32) -> f32 {
|
||||
let delayed = self.delay.process(input, self.feedback);
|
||||
input * self.dry + delayed * self.wet
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_delay_line_basic() {
|
||||
let mut dl = DelayLine::new(2);
|
||||
assert!((dl.process(1.0, 0.0) - 0.0).abs() < 1e-6); // no output yet
|
||||
assert!((dl.process(0.0, 0.0) - 0.0).abs() < 1e-6);
|
||||
assert!((dl.process(0.0, 0.0) - 1.0).abs() < 1e-6); // input appears after 2-sample delay
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delay_line_feedback() {
|
||||
let mut dl = DelayLine::new(2);
|
||||
dl.process(1.0, 0.5);
|
||||
dl.process(0.0, 0.5);
|
||||
let out = dl.process(0.0, 0.5); // delayed 1.0
|
||||
assert!((out - 1.0).abs() < 1e-6);
|
||||
dl.process(0.0, 0.5);
|
||||
let out2 = dl.process(0.0, 0.5); // feedback: 0.5
|
||||
assert!((out2 - 0.5).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reverb_preserves_signal() {
|
||||
let mut rev = Reverb::new(44100);
|
||||
// Process silence -> should be silence
|
||||
let out = rev.process(0.0);
|
||||
assert!((out - 0.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reverb_produces_output() {
|
||||
let mut rev = Reverb::new(44100);
|
||||
// Send an impulse, collect some output
|
||||
let mut energy = 0.0;
|
||||
rev.process(1.0);
|
||||
for _ in 0..4410 {
|
||||
let s = rev.process(0.0);
|
||||
energy += s.abs();
|
||||
}
|
||||
assert!(energy > 0.01, "reverb should produce decaying output after impulse");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_echo_basic() {
|
||||
let mut echo = Echo::new(100.0, 44100); // 100ms delay
|
||||
let delay_samples = (100.0 * 44100.0 / 1000.0) as usize;
|
||||
// Send impulse
|
||||
echo.process(1.0);
|
||||
// Process until delay
|
||||
for _ in 1..delay_samples {
|
||||
echo.process(0.0);
|
||||
}
|
||||
let out = echo.process(0.0);
|
||||
// Should have echo of input (wet * delayed)
|
||||
assert!(out.abs() > 0.01, "echo should produce delayed output");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_echo_wet_dry() {
|
||||
let mut echo = Echo::new(10.0, 44100);
|
||||
echo.wet = 0.0;
|
||||
echo.dry = 1.0;
|
||||
let out = echo.process(0.5);
|
||||
assert!((out - 0.5).abs() < 1e-6); // dry only
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,8 @@ pub struct SparseSet<T> {
|
||||
sparse: Vec<Option<usize>>,
|
||||
dense_entities: Vec<Entity>,
|
||||
dense_data: Vec<T>,
|
||||
ticks: Vec<u64>,
|
||||
current_tick: u64,
|
||||
}
|
||||
|
||||
impl<T> SparseSet<T> {
|
||||
@@ -13,6 +15,8 @@ impl<T> SparseSet<T> {
|
||||
sparse: Vec::new(),
|
||||
dense_entities: Vec::new(),
|
||||
dense_data: Vec::new(),
|
||||
ticks: Vec::new(),
|
||||
current_tick: 1,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,11 +31,13 @@ impl<T> SparseSet<T> {
|
||||
// Overwrite existing
|
||||
self.dense_data[dense_idx] = value;
|
||||
self.dense_entities[dense_idx] = entity;
|
||||
self.ticks[dense_idx] = self.current_tick;
|
||||
} else {
|
||||
let dense_idx = self.dense_data.len();
|
||||
self.sparse[id] = Some(dense_idx);
|
||||
self.dense_entities.push(entity);
|
||||
self.dense_data.push(value);
|
||||
self.ticks.push(self.current_tick);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,12 +55,14 @@ impl<T> SparseSet<T> {
|
||||
|
||||
if dense_idx == last_idx {
|
||||
self.dense_entities.pop();
|
||||
self.ticks.pop();
|
||||
Some(self.dense_data.pop().unwrap())
|
||||
} else {
|
||||
// Swap with last
|
||||
let swapped_entity = self.dense_entities[last_idx];
|
||||
self.sparse[swapped_entity.id as usize] = Some(dense_idx);
|
||||
self.dense_entities.swap_remove(dense_idx);
|
||||
self.ticks.swap_remove(dense_idx);
|
||||
Some(self.dense_data.swap_remove(dense_idx))
|
||||
}
|
||||
}
|
||||
@@ -74,6 +82,7 @@ impl<T> SparseSet<T> {
|
||||
if self.dense_entities[dense_idx] != entity {
|
||||
return None;
|
||||
}
|
||||
self.ticks[dense_idx] = self.current_tick;
|
||||
Some(&mut self.dense_data[dense_idx])
|
||||
}
|
||||
|
||||
@@ -114,6 +123,29 @@ impl<T> SparseSet<T> {
|
||||
pub fn data_mut(&mut self) -> &mut [T] {
|
||||
&mut self.dense_data
|
||||
}
|
||||
|
||||
/// Check if an entity's component was changed this tick.
|
||||
pub fn is_changed(&self, entity: Entity) -> bool {
|
||||
if let Some(&index) = self.sparse.get(entity.id as usize).and_then(|o| o.as_ref()) {
|
||||
self.ticks[index] == self.current_tick
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance the tick counter (call at end of frame).
|
||||
pub fn increment_tick(&mut self) {
|
||||
self.current_tick += 1;
|
||||
}
|
||||
|
||||
/// Return entities changed this tick with their data.
|
||||
pub fn iter_changed(&self) -> impl Iterator<Item = (Entity, &T)> + '_ {
|
||||
self.dense_entities.iter()
|
||||
.zip(self.dense_data.iter())
|
||||
.zip(self.ticks.iter())
|
||||
.filter(move |((_, _), &tick)| tick == self.current_tick)
|
||||
.map(|((entity, data), _)| (*entity, data))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for SparseSet<T> {
|
||||
@@ -127,6 +159,7 @@ pub trait ComponentStorage: Any {
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
fn remove_entity(&mut self, entity: Entity);
|
||||
fn storage_len(&self) -> usize;
|
||||
fn increment_tick(&mut self);
|
||||
}
|
||||
|
||||
impl<T: 'static> ComponentStorage for SparseSet<T> {
|
||||
@@ -142,6 +175,9 @@ impl<T: 'static> ComponentStorage for SparseSet<T> {
|
||||
fn storage_len(&self) -> usize {
|
||||
self.dense_data.len()
|
||||
}
|
||||
fn increment_tick(&mut self) {
|
||||
self.current_tick += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -235,6 +271,74 @@ mod tests {
|
||||
assert!(!set.contains(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_is_changed() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e = make_entity(0, 0);
|
||||
set.insert(e, 42);
|
||||
assert!(set.is_changed(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_mut_marks_changed() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e = make_entity(0, 0);
|
||||
set.insert(e, 42);
|
||||
set.increment_tick();
|
||||
assert!(!set.is_changed(e));
|
||||
let _ = set.get_mut(e);
|
||||
assert!(set.is_changed(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_not_changed() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e = make_entity(0, 0);
|
||||
set.insert(e, 42);
|
||||
set.increment_tick();
|
||||
let _ = set.get(e);
|
||||
assert!(!set.is_changed(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_resets_changed() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e = make_entity(0, 0);
|
||||
set.insert(e, 42);
|
||||
assert!(set.is_changed(e));
|
||||
set.increment_tick();
|
||||
assert!(!set.is_changed(e));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iter_changed() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e1 = make_entity(0, 0);
|
||||
let e2 = make_entity(1, 0);
|
||||
let e3 = make_entity(2, 0);
|
||||
set.insert(e1, 10);
|
||||
set.insert(e2, 20);
|
||||
set.insert(e3, 30);
|
||||
set.increment_tick();
|
||||
let _ = set.get_mut(e2);
|
||||
let changed: Vec<_> = set.iter_changed().collect();
|
||||
assert_eq!(changed.len(), 1);
|
||||
assert_eq!(changed[0].0.id, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_preserves_ticks() {
|
||||
let mut set = SparseSet::<u32>::new();
|
||||
let e1 = make_entity(0, 0);
|
||||
let e2 = make_entity(1, 0);
|
||||
set.insert(e1, 10);
|
||||
set.insert(e2, 20);
|
||||
set.increment_tick();
|
||||
let _ = set.get_mut(e2);
|
||||
set.remove(e1);
|
||||
assert!(set.is_changed(e2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_swap_remove_correctness() {
|
||||
let mut set: SparseSet<i32> = SparseSet::new();
|
||||
|
||||
@@ -228,6 +228,23 @@ impl World {
|
||||
result
|
||||
}
|
||||
|
||||
/// Query entities whose component T was changed this tick.
|
||||
pub fn query_changed<T: 'static>(&self) -> Vec<(Entity, &T)> {
|
||||
if let Some(storage) = self.storages.get(&TypeId::of::<T>()) {
|
||||
let set = storage.as_any().downcast_ref::<SparseSet<T>>().unwrap();
|
||||
set.iter_changed().collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance tick on all component storages (call at end of frame).
|
||||
pub fn clear_changed(&mut self) {
|
||||
for storage in self.storages.values_mut() {
|
||||
storage.increment_tick();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_component<T: 'static>(&self, entity: Entity) -> bool {
|
||||
self.storage::<T>().map_or(false, |s| s.contains(entity))
|
||||
}
|
||||
@@ -526,6 +543,37 @@ mod tests {
|
||||
assert_eq!(results[0].0, e1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_query_changed() {
|
||||
let mut world = World::new();
|
||||
let e1 = world.spawn();
|
||||
let e2 = world.spawn();
|
||||
world.add(e1, 10u32);
|
||||
world.add(e2, 20u32);
|
||||
world.clear_changed();
|
||||
if let Some(v) = world.get_mut::<u32>(e1) {
|
||||
*v = 100;
|
||||
}
|
||||
let changed = world.query_changed::<u32>();
|
||||
assert_eq!(changed.len(), 1);
|
||||
assert_eq!(*changed[0].1, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_changed_all_storages() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, 42u32);
|
||||
world.add(e, 3.14f32);
|
||||
let changed_u32 = world.query_changed::<u32>();
|
||||
assert_eq!(changed_u32.len(), 1);
|
||||
world.clear_changed();
|
||||
let changed_u32 = world.query_changed::<u32>();
|
||||
assert_eq!(changed_u32.len(), 0);
|
||||
let changed_f32 = world.query_changed::<f32>();
|
||||
assert_eq!(changed_f32.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_count() {
|
||||
let mut world = World::new();
|
||||
|
||||
@@ -5,4 +5,7 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bytemuck = { workspace = true }
|
||||
voltex_math = { workspace = true }
|
||||
voltex_ecs = { workspace = true }
|
||||
voltex_renderer = { workspace = true }
|
||||
wgpu = { workspace = true }
|
||||
|
||||
297
crates/voltex_editor/src/asset_browser.rs
Normal file
297
crates/voltex_editor/src/asset_browser.rs
Normal file
@@ -0,0 +1,297 @@
|
||||
use std::path::PathBuf;
|
||||
use crate::ui_context::UiContext;
|
||||
use crate::dock::Rect;
|
||||
use crate::layout::LayoutState;
|
||||
|
||||
const COLOR_SELECTED: [u8; 4] = [0x44, 0x66, 0x88, 0xFF];
|
||||
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
|
||||
const PADDING: f32 = 4.0;
|
||||
|
||||
pub struct DirEntry {
|
||||
pub name: String,
|
||||
pub is_dir: bool,
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
pub struct AssetBrowser {
|
||||
pub root: PathBuf,
|
||||
pub current: PathBuf,
|
||||
pub entries: Vec<DirEntry>,
|
||||
pub selected_file: Option<String>,
|
||||
}
|
||||
|
||||
pub fn format_size(bytes: u64) -> String {
|
||||
if bytes < 1024 {
|
||||
format!("{} B", bytes)
|
||||
} else if bytes < 1024 * 1024 {
|
||||
format!("{:.1} KB", bytes as f64 / 1024.0)
|
||||
} else {
|
||||
format!("{:.1} MB", bytes as f64 / (1024.0 * 1024.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl AssetBrowser {
|
||||
pub fn new(root: PathBuf) -> Self {
|
||||
let root = std::fs::canonicalize(&root).unwrap_or(root);
|
||||
let current = root.clone();
|
||||
let mut browser = AssetBrowser {
|
||||
root,
|
||||
current,
|
||||
entries: Vec::new(),
|
||||
selected_file: None,
|
||||
};
|
||||
browser.refresh();
|
||||
browser
|
||||
}
|
||||
|
||||
pub fn refresh(&mut self) {
|
||||
self.entries.clear();
|
||||
self.selected_file = None;
|
||||
if let Ok(read_dir) = std::fs::read_dir(&self.current) {
|
||||
for entry in read_dir.flatten() {
|
||||
let meta = entry.metadata().ok();
|
||||
let is_dir = meta.as_ref().map_or(false, |m| m.is_dir());
|
||||
let size = meta.as_ref().map_or(0, |m| m.len());
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
self.entries.push(DirEntry { name, is_dir, size });
|
||||
}
|
||||
}
|
||||
self.entries.sort_by(|a, b| {
|
||||
b.is_dir.cmp(&a.is_dir).then(a.name.cmp(&b.name))
|
||||
});
|
||||
}
|
||||
|
||||
pub fn navigate_to(&mut self, dir_name: &str) {
|
||||
let target = self.current.join(dir_name);
|
||||
if target.starts_with(&self.root) && target.is_dir() {
|
||||
self.current = target;
|
||||
self.refresh();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn go_up(&mut self) {
|
||||
if self.current != self.root {
|
||||
if let Some(parent) = self.current.parent() {
|
||||
let parent = parent.to_path_buf();
|
||||
if parent.starts_with(&self.root) || parent == self.root {
|
||||
self.current = parent;
|
||||
self.refresh();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> String {
|
||||
self.current
|
||||
.strip_prefix(&self.root)
|
||||
.unwrap_or(&self.current)
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn asset_browser_panel(
|
||||
ui: &mut UiContext,
|
||||
browser: &mut AssetBrowser,
|
||||
rect: &Rect,
|
||||
) {
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
|
||||
let path_text = format!("Path: /{}", browser.relative_path());
|
||||
ui.text(&path_text);
|
||||
|
||||
// Go up button
|
||||
if browser.current != browser.root {
|
||||
if ui.button("[..]") {
|
||||
browser.go_up();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let gw = ui.font.glyph_width as f32;
|
||||
let gh = ui.font.glyph_height as f32;
|
||||
let line_h = gh + PADDING;
|
||||
|
||||
// Collect click action to avoid borrow conflict
|
||||
let mut clicked_dir: Option<String> = None;
|
||||
let entries_snapshot: Vec<(String, bool)> = browser.entries.iter()
|
||||
.map(|e| (e.name.clone(), e.is_dir))
|
||||
.collect();
|
||||
|
||||
for (name, is_dir) in &entries_snapshot {
|
||||
let y = ui.layout.cursor_y;
|
||||
let x = rect.x + PADDING;
|
||||
|
||||
// Highlight selected file
|
||||
if !is_dir {
|
||||
if browser.selected_file.as_deref() == Some(name.as_str()) {
|
||||
ui.draw_list.add_rect(rect.x, y, rect.w, line_h, COLOR_SELECTED);
|
||||
}
|
||||
}
|
||||
|
||||
// Click
|
||||
if ui.mouse_clicked && ui.mouse_in_rect(rect.x, y, rect.w, line_h) {
|
||||
if *is_dir {
|
||||
clicked_dir = Some(name.clone());
|
||||
} else {
|
||||
browser.selected_file = Some(name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Label
|
||||
let label = if *is_dir {
|
||||
format!("[D] {}", name)
|
||||
} else {
|
||||
format!(" {}", name)
|
||||
};
|
||||
|
||||
let text_y = y + (line_h - gh) * 0.5;
|
||||
let mut cx = x;
|
||||
for ch in label.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += gw;
|
||||
}
|
||||
|
||||
ui.layout.cursor_y += line_h;
|
||||
}
|
||||
|
||||
if let Some(dir) = clicked_dir {
|
||||
browser.navigate_to(&dir);
|
||||
}
|
||||
|
||||
// File info
|
||||
if let Some(ref file_name) = browser.selected_file.clone() {
|
||||
ui.text("-- File Info --");
|
||||
ui.text(&format!("Name: {}", file_name));
|
||||
if let Some(entry) = browser.entries.iter().find(|e| e.name == *file_name) {
|
||||
ui.text(&format!("Size: {}", format_size(entry.size)));
|
||||
if let Some(dot_pos) = file_name.rfind('.') {
|
||||
let ext = &file_name[dot_pos..];
|
||||
ui.text(&format!("Type: {}", ext));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
|
||||
fn make_temp_dir(name: &str) -> PathBuf {
|
||||
let dir = std::env::temp_dir().join(format!("voltex_ab_test_{}", name));
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
fs::create_dir_all(&dir).unwrap();
|
||||
dir
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_scans_entries() {
|
||||
let dir = make_temp_dir("scan");
|
||||
fs::write(dir.join("file1.txt"), "hello").unwrap();
|
||||
fs::write(dir.join("file2.png"), "data").unwrap();
|
||||
fs::create_dir_all(dir.join("subdir")).unwrap();
|
||||
|
||||
let browser = AssetBrowser::new(dir.clone());
|
||||
assert_eq!(browser.entries.len(), 3);
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entries_sorted_dirs_first() {
|
||||
let dir = make_temp_dir("sort");
|
||||
fs::write(dir.join("zebra.txt"), "z").unwrap();
|
||||
fs::write(dir.join("alpha.txt"), "a").unwrap();
|
||||
fs::create_dir_all(dir.join("middle_dir")).unwrap();
|
||||
|
||||
let browser = AssetBrowser::new(dir.clone());
|
||||
// Dir should come first
|
||||
assert!(browser.entries[0].is_dir);
|
||||
assert_eq!(browser.entries[0].name, "middle_dir");
|
||||
// Then files alphabetically
|
||||
assert_eq!(browser.entries[1].name, "alpha.txt");
|
||||
assert_eq!(browser.entries[2].name, "zebra.txt");
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_navigate_to() {
|
||||
let dir = make_temp_dir("nav");
|
||||
fs::create_dir_all(dir.join("sub")).unwrap();
|
||||
fs::write(dir.join("sub").join("inner.txt"), "x").unwrap();
|
||||
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
browser.navigate_to("sub");
|
||||
assert!(browser.current.ends_with("sub"));
|
||||
assert_eq!(browser.entries.len(), 1);
|
||||
assert_eq!(browser.entries[0].name, "inner.txt");
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_up() {
|
||||
let dir = make_temp_dir("goup");
|
||||
fs::create_dir_all(dir.join("child")).unwrap();
|
||||
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
browser.navigate_to("child");
|
||||
assert!(browser.current.ends_with("child"));
|
||||
|
||||
browser.go_up();
|
||||
assert_eq!(browser.current, std::fs::canonicalize(&dir).unwrap());
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_up_at_root() {
|
||||
let dir = make_temp_dir("goup_root");
|
||||
let browser_root = std::fs::canonicalize(&dir).unwrap();
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
browser.go_up(); // should be no-op
|
||||
assert_eq!(browser.current, browser_root);
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_root_guard() {
|
||||
let dir = make_temp_dir("guard");
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
browser.navigate_to(".."); // should be rejected
|
||||
assert_eq!(browser.current, std::fs::canonicalize(&dir).unwrap());
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_size() {
|
||||
assert_eq!(format_size(0), "0 B");
|
||||
assert_eq!(format_size(500), "500 B");
|
||||
assert_eq!(format_size(1024), "1.0 KB");
|
||||
assert_eq!(format_size(1536), "1.5 KB");
|
||||
assert_eq!(format_size(1048576), "1.0 MB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_panel_draws_commands() {
|
||||
let dir = make_temp_dir("panel");
|
||||
fs::write(dir.join("test.txt"), "hello").unwrap();
|
||||
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
asset_browser_panel(&mut ui, &mut browser, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(ui.draw_list.commands.len() > 0);
|
||||
|
||||
let _ = fs::remove_dir_all(&dir);
|
||||
}
|
||||
}
|
||||
558
crates/voltex_editor/src/dock.rs
Normal file
558
crates/voltex_editor/src/dock.rs
Normal file
@@ -0,0 +1,558 @@
|
||||
use crate::ui_context::UiContext;
|
||||
|
||||
const TAB_BAR_HEIGHT: f32 = 20.0;
|
||||
const MIN_RATIO: f32 = 0.1;
|
||||
const MAX_RATIO: f32 = 0.9;
|
||||
const RESIZE_HANDLE_HALF: f32 = 3.0;
|
||||
const GLYPH_W: f32 = 8.0;
|
||||
const TAB_PADDING: f32 = 8.0;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Rect {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub w: f32,
|
||||
pub h: f32,
|
||||
}
|
||||
|
||||
impl Rect {
|
||||
pub fn contains(&self, px: f32, py: f32) -> bool {
|
||||
px >= self.x && px < self.x + self.w && py >= self.y && py < self.y + self.h
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum Axis {
|
||||
Horizontal,
|
||||
Vertical,
|
||||
}
|
||||
|
||||
pub enum DockNode {
|
||||
Leaf { tabs: Vec<u32>, active: usize },
|
||||
Split { axis: Axis, ratio: f32, children: [Box<DockNode>; 2] },
|
||||
}
|
||||
|
||||
impl DockNode {
|
||||
pub fn leaf(tabs: Vec<u32>) -> Self {
|
||||
DockNode::Leaf { tabs, active: 0 }
|
||||
}
|
||||
|
||||
pub fn split(axis: Axis, ratio: f32, a: DockNode, b: DockNode) -> Self {
|
||||
DockNode::Split {
|
||||
axis,
|
||||
ratio: ratio.clamp(MIN_RATIO, MAX_RATIO),
|
||||
children: [Box::new(a), Box::new(b)],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeafLayout {
|
||||
pub leaf_index: usize,
|
||||
pub tabs: Vec<u32>,
|
||||
pub active: usize,
|
||||
pub tab_bar_rect: Rect,
|
||||
pub content_rect: Rect,
|
||||
}
|
||||
|
||||
struct SplitLayout {
|
||||
rect: Rect,
|
||||
axis: Axis,
|
||||
boundary: f32,
|
||||
path: Vec<usize>,
|
||||
}
|
||||
|
||||
struct ResizeState {
|
||||
path: Vec<usize>,
|
||||
axis: Axis,
|
||||
origin: f32,
|
||||
size: f32,
|
||||
}
|
||||
|
||||
enum UpdateAction {
|
||||
None,
|
||||
StartResize(ResizeState),
|
||||
SetActiveTab { leaf_index: usize, new_active: usize },
|
||||
}
|
||||
|
||||
pub struct DockTree {
|
||||
root: DockNode,
|
||||
names: Vec<&'static str>,
|
||||
cached_leaves: Vec<LeafLayout>,
|
||||
cached_splits: Vec<SplitLayout>,
|
||||
resizing: Option<ResizeState>,
|
||||
prev_mouse_down: bool,
|
||||
}
|
||||
|
||||
fn layout_recursive(
|
||||
node: &DockNode,
|
||||
rect: Rect,
|
||||
path: &mut Vec<usize>,
|
||||
leaf_counter: &mut usize,
|
||||
leaves: &mut Vec<LeafLayout>,
|
||||
splits: &mut Vec<SplitLayout>,
|
||||
) {
|
||||
match node {
|
||||
DockNode::Leaf { tabs, active } => {
|
||||
assert!(!tabs.is_empty(), "DockNode::Leaf must have at least one tab");
|
||||
let idx = *leaf_counter;
|
||||
*leaf_counter += 1;
|
||||
leaves.push(LeafLayout {
|
||||
leaf_index: idx,
|
||||
tabs: tabs.clone(),
|
||||
active: (*active).min(tabs.len().saturating_sub(1)),
|
||||
tab_bar_rect: Rect { x: rect.x, y: rect.y, w: rect.w, h: TAB_BAR_HEIGHT },
|
||||
content_rect: Rect {
|
||||
x: rect.x,
|
||||
y: rect.y + TAB_BAR_HEIGHT,
|
||||
w: rect.w,
|
||||
h: (rect.h - TAB_BAR_HEIGHT).max(0.0),
|
||||
},
|
||||
});
|
||||
}
|
||||
DockNode::Split { axis, ratio, children } => {
|
||||
let (r1, r2, boundary) = match axis {
|
||||
Axis::Horizontal => {
|
||||
let w1 = rect.w * ratio;
|
||||
let b = rect.x + w1;
|
||||
(
|
||||
Rect { x: rect.x, y: rect.y, w: w1, h: rect.h },
|
||||
Rect { x: b, y: rect.y, w: rect.w - w1, h: rect.h },
|
||||
b,
|
||||
)
|
||||
}
|
||||
Axis::Vertical => {
|
||||
let h1 = rect.h * ratio;
|
||||
let b = rect.y + h1;
|
||||
(
|
||||
Rect { x: rect.x, y: rect.y, w: rect.w, h: h1 },
|
||||
Rect { x: rect.x, y: b, w: rect.w, h: rect.h - h1 },
|
||||
b,
|
||||
)
|
||||
}
|
||||
};
|
||||
splits.push(SplitLayout { rect, axis: *axis, boundary, path: path.clone() });
|
||||
path.push(0);
|
||||
layout_recursive(&children[0], r1, path, leaf_counter, leaves, splits);
|
||||
path.pop();
|
||||
path.push(1);
|
||||
layout_recursive(&children[1], r2, path, leaf_counter, leaves, splits);
|
||||
path.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DockTree {
|
||||
pub fn new(root: DockNode, names: Vec<&'static str>) -> Self {
|
||||
DockTree {
|
||||
root,
|
||||
names,
|
||||
cached_leaves: Vec::new(),
|
||||
cached_splits: Vec::new(),
|
||||
resizing: None,
|
||||
prev_mouse_down: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self, mouse_x: f32, mouse_y: f32, mouse_down: bool) {
|
||||
let just_clicked = mouse_down && !self.prev_mouse_down;
|
||||
self.prev_mouse_down = mouse_down;
|
||||
|
||||
// Active resize in progress
|
||||
if let Some(ref state) = self.resizing {
|
||||
if mouse_down {
|
||||
let new_ratio = match state.axis {
|
||||
Axis::Horizontal => (mouse_x - state.origin) / state.size,
|
||||
Axis::Vertical => (mouse_y - state.origin) / state.size,
|
||||
};
|
||||
let clamped = new_ratio.clamp(MIN_RATIO, MAX_RATIO);
|
||||
let path = state.path.clone();
|
||||
Self::set_ratio_at_path(&mut self.root, &path, clamped);
|
||||
} else {
|
||||
self.resizing = None;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if !just_clicked { return; }
|
||||
|
||||
let action = self.find_click_action(mouse_x, mouse_y);
|
||||
|
||||
match action {
|
||||
UpdateAction::StartResize(state) => { self.resizing = Some(state); }
|
||||
UpdateAction::SetActiveTab { leaf_index, new_active } => {
|
||||
Self::set_active_nth(&mut self.root, leaf_index, new_active, &mut 0);
|
||||
}
|
||||
UpdateAction::None => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_click_action(&self, mx: f32, my: f32) -> UpdateAction {
|
||||
// Check resize handles first (priority over tab clicks)
|
||||
for split in &self.cached_splits {
|
||||
let hit = match split.axis {
|
||||
Axis::Horizontal => {
|
||||
mx >= split.boundary - RESIZE_HANDLE_HALF
|
||||
&& mx <= split.boundary + RESIZE_HANDLE_HALF
|
||||
&& my >= split.rect.y
|
||||
&& my < split.rect.y + split.rect.h
|
||||
}
|
||||
Axis::Vertical => {
|
||||
my >= split.boundary - RESIZE_HANDLE_HALF
|
||||
&& my <= split.boundary + RESIZE_HANDLE_HALF
|
||||
&& mx >= split.rect.x
|
||||
&& mx < split.rect.x + split.rect.w
|
||||
}
|
||||
};
|
||||
if hit {
|
||||
let (origin, size) = match split.axis {
|
||||
Axis::Horizontal => (split.rect.x, split.rect.w),
|
||||
Axis::Vertical => (split.rect.y, split.rect.h),
|
||||
};
|
||||
return UpdateAction::StartResize(ResizeState {
|
||||
path: split.path.clone(),
|
||||
axis: split.axis,
|
||||
origin,
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check tab bar clicks
|
||||
for leaf in &self.cached_leaves {
|
||||
if leaf.tabs.len() <= 1 { continue; }
|
||||
if !leaf.tab_bar_rect.contains(mx, my) { continue; }
|
||||
let mut tx = leaf.tab_bar_rect.x;
|
||||
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
|
||||
let name_len = self.names.get(panel_id as usize).map(|n| n.len()).unwrap_or(1);
|
||||
let tab_w = name_len as f32 * GLYPH_W + TAB_PADDING;
|
||||
if mx >= tx && mx < tx + tab_w {
|
||||
return UpdateAction::SetActiveTab { leaf_index: leaf.leaf_index, new_active: i };
|
||||
}
|
||||
tx += tab_w;
|
||||
}
|
||||
}
|
||||
|
||||
UpdateAction::None
|
||||
}
|
||||
|
||||
fn set_ratio_at_path(node: &mut DockNode, path: &[usize], ratio: f32) {
|
||||
if path.is_empty() {
|
||||
if let DockNode::Split { ratio: r, .. } = node { *r = ratio; }
|
||||
return;
|
||||
}
|
||||
if let DockNode::Split { children, .. } = node {
|
||||
Self::set_ratio_at_path(&mut children[path[0]], &path[1..], ratio);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_active_nth(node: &mut DockNode, target: usize, new_active: usize, count: &mut usize) {
|
||||
match node {
|
||||
DockNode::Leaf { active, .. } => {
|
||||
if *count == target { *active = new_active; }
|
||||
*count += 1;
|
||||
}
|
||||
DockNode::Split { children, .. } => {
|
||||
Self::set_active_nth(&mut children[0], target, new_active, count);
|
||||
Self::set_active_nth(&mut children[1], target, new_active, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn layout(&mut self, rect: Rect) -> Vec<(u32, Rect)> {
|
||||
self.cached_leaves.clear();
|
||||
self.cached_splits.clear();
|
||||
let mut path = Vec::new();
|
||||
let mut counter = 0;
|
||||
layout_recursive(
|
||||
&self.root,
|
||||
rect,
|
||||
&mut path,
|
||||
&mut counter,
|
||||
&mut self.cached_leaves,
|
||||
&mut self.cached_splits,
|
||||
);
|
||||
self.cached_leaves
|
||||
.iter()
|
||||
.map(|l| {
|
||||
let active = l.active.min(l.tabs.len().saturating_sub(1));
|
||||
(l.tabs[active], l.content_rect)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn draw_chrome(&self, ui: &mut UiContext) {
|
||||
let glyph_w = ui.font.glyph_width as f32;
|
||||
let glyph_h = ui.font.glyph_height as f32;
|
||||
|
||||
const COLOR_TAB_ACTIVE: [u8; 4] = [60, 60, 60, 255];
|
||||
const COLOR_TAB_INACTIVE: [u8; 4] = [40, 40, 40, 255];
|
||||
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
|
||||
const COLOR_SPLIT: [u8; 4] = [30, 30, 30, 255];
|
||||
const COLOR_SPLIT_ACTIVE: [u8; 4] = [100, 100, 200, 255];
|
||||
const COLOR_SEPARATOR: [u8; 4] = [50, 50, 50, 255];
|
||||
|
||||
// Draw tab bars for each leaf
|
||||
for leaf in &self.cached_leaves {
|
||||
let bar = &leaf.tab_bar_rect;
|
||||
let active = leaf.active.min(leaf.tabs.len().saturating_sub(1));
|
||||
let mut tx = bar.x;
|
||||
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
|
||||
let name = self.names.get(panel_id as usize).copied().unwrap_or("?");
|
||||
let tab_w = name.len() as f32 * glyph_w + TAB_PADDING;
|
||||
let bg = if i == active { COLOR_TAB_ACTIVE } else { COLOR_TAB_INACTIVE };
|
||||
ui.draw_list.add_rect(tx, bar.y, tab_w, bar.h, bg);
|
||||
// Inline text rendering (avoids borrow conflict with ui.font + ui.draw_list)
|
||||
let text_x = tx + TAB_PADDING * 0.5;
|
||||
let text_y = bar.y + (bar.h - glyph_h) * 0.5;
|
||||
let mut cx = text_x;
|
||||
for ch in name.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, glyph_w, glyph_h, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += glyph_w;
|
||||
}
|
||||
tx += tab_w;
|
||||
}
|
||||
// Tab bar bottom separator
|
||||
ui.draw_list.add_rect(bar.x, bar.y + bar.h - 1.0, bar.w, 1.0, COLOR_SEPARATOR);
|
||||
}
|
||||
|
||||
// Draw split lines
|
||||
for split in &self.cached_splits {
|
||||
let color = if self.resizing.as_ref().map(|r| &r.path) == Some(&split.path) {
|
||||
COLOR_SPLIT_ACTIVE
|
||||
} else {
|
||||
COLOR_SPLIT
|
||||
};
|
||||
match split.axis {
|
||||
Axis::Horizontal => {
|
||||
ui.draw_list.add_rect(split.boundary - 0.5, split.rect.y, 1.0, split.rect.h, color);
|
||||
}
|
||||
Axis::Vertical => {
|
||||
ui.draw_list.add_rect(split.rect.x, split.boundary - 0.5, split.rect.w, 1.0, color);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ui_context::UiContext;
|
||||
|
||||
#[test]
|
||||
fn test_rect_contains() {
|
||||
let r = Rect { x: 10.0, y: 20.0, w: 100.0, h: 50.0 };
|
||||
assert!(r.contains(50.0, 40.0));
|
||||
assert!(!r.contains(5.0, 40.0));
|
||||
assert!(!r.contains(50.0, 80.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_single_leaf() {
|
||||
let mut dock = DockTree::new(DockNode::leaf(vec![0]), vec!["Panel0"]);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 1);
|
||||
assert_eq!(areas[0].0, 0);
|
||||
let r = &areas[0].1;
|
||||
assert!((r.y - 20.0).abs() < 1e-3);
|
||||
assert!((r.h - 280.0).abs() < 1e-3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_horizontal_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.25, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["Left", "Right"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 2);
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 100.0).abs() < 1e-3);
|
||||
let right = areas.iter().find(|(id, _)| *id == 1).unwrap();
|
||||
assert!((right.1.w - 300.0).abs() < 1e-3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_vertical_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["Top", "Bottom"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let top = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((top.1.h - 130.0).abs() < 1e-3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_nested_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(
|
||||
Axis::Horizontal,
|
||||
0.25,
|
||||
DockNode::leaf(vec![0]),
|
||||
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![1]), DockNode::leaf(vec![2])),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_active_tab_only() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1, 2], active: 1 },
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_active_clamped_if_out_of_bounds() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0], active: 5 },
|
||||
vec!["A"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_empty_tabs_panics() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![], active: 0 },
|
||||
vec![],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tab_click_switches_active() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1, 2], active: 0 },
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
// Tab "A": w = 1*8+8 = 16, tab "B" starts at x=16. Click at x=20 (inside "B").
|
||||
dock.update(20.0, 10.0, true);
|
||||
dock.update(20.0, 10.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tab_click_no_change_on_single_tab() {
|
||||
let mut dock = DockTree::new(DockNode::leaf(vec![0]), vec!["Only"]);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(10.0, 10.0, true);
|
||||
dock.update(10.0, 10.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_horizontal_drag() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["L", "R"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(200.0, 150.0, true); // click on boundary at x=200
|
||||
dock.update(120.0, 150.0, true); // drag to x=120
|
||||
dock.update(120.0, 150.0, false); // release
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 120.0).abs() < 5.0, "left w={}", left.1.w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_clamps_ratio() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["L", "R"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(200.0, 150.0, true);
|
||||
dock.update(5.0, 150.0, true);
|
||||
dock.update(5.0, 150.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 40.0).abs() < 1e-3, "left w={}", left.1.w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_priority_over_tab_click() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::leaf(vec![2]),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(200.0, 10.0, true); // click at boundary within tab bar
|
||||
dock.update(180.0, 10.0, true); // drag
|
||||
dock.update(180.0, 10.0, false); // release
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0 || *id == 1).unwrap();
|
||||
assert!((left.1.w - 180.0).abs() < 5.0, "resize should have priority, w={}", left.1.w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_draw_chrome_produces_draw_commands() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::leaf(vec![2]),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 800.0, h: 600.0 });
|
||||
dock.draw_chrome(&mut ui);
|
||||
assert!(ui.draw_list.commands.len() >= 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_draw_chrome_active_tab_color() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 1 },
|
||||
vec!["AA", "BB"],
|
||||
);
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.draw_chrome(&mut ui);
|
||||
// First tab bg (inactive "AA"): color [40, 40, 40, 255]
|
||||
let first_bg = &ui.draw_list.vertices[0];
|
||||
assert_eq!(first_bg.color, [40, 40, 40, 255]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_full_frame_cycle() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.3,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::split(Axis::Vertical, 0.6, DockNode::leaf(vec![2]), DockNode::leaf(vec![3])),
|
||||
),
|
||||
vec!["Hierarchy", "Inspector", "Viewport", "Console"],
|
||||
);
|
||||
let mut ui = UiContext::new(1280.0, 720.0);
|
||||
for _ in 0..3 {
|
||||
ui.begin_frame(100.0, 100.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 1280.0, h: 720.0 });
|
||||
dock.update(100.0, 100.0, false);
|
||||
dock.draw_chrome(&mut ui);
|
||||
assert_eq!(areas.len(), 3);
|
||||
for (_, r) in &areas {
|
||||
assert!(r.w > 0.0);
|
||||
assert!(r.h > 0.0);
|
||||
}
|
||||
ui.end_frame();
|
||||
}
|
||||
}
|
||||
}
|
||||
197
crates/voltex_editor/src/glyph_cache.rs
Normal file
197
crates/voltex_editor/src/glyph_cache.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const ATLAS_SIZE: u32 = 1024;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlyphInfo {
|
||||
pub uv: [f32; 4], // u0, v0, u1, v1
|
||||
pub width: f32,
|
||||
pub height: f32,
|
||||
pub advance: f32,
|
||||
pub bearing_x: f32,
|
||||
pub bearing_y: f32,
|
||||
}
|
||||
|
||||
pub struct GlyphCache {
|
||||
pub atlas_data: Vec<u8>,
|
||||
pub atlas_width: u32,
|
||||
pub atlas_height: u32,
|
||||
glyphs: HashMap<char, GlyphInfo>,
|
||||
cursor_x: u32,
|
||||
cursor_y: u32,
|
||||
row_height: u32,
|
||||
pub dirty: bool,
|
||||
}
|
||||
|
||||
impl GlyphCache {
|
||||
pub fn new(width: u32, height: u32) -> Self {
|
||||
GlyphCache {
|
||||
atlas_data: vec![0u8; (width * height) as usize],
|
||||
atlas_width: width,
|
||||
atlas_height: height,
|
||||
glyphs: HashMap::new(),
|
||||
cursor_x: 0,
|
||||
cursor_y: 0,
|
||||
row_height: 0,
|
||||
dirty: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, ch: char) -> Option<&GlyphInfo> {
|
||||
self.glyphs.get(&ch)
|
||||
}
|
||||
|
||||
/// Insert a rasterized glyph bitmap into the atlas.
|
||||
/// Returns reference to the cached GlyphInfo.
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
ch: char,
|
||||
bitmap: &[u8],
|
||||
bmp_w: u32,
|
||||
bmp_h: u32,
|
||||
advance: f32,
|
||||
bearing_x: f32,
|
||||
bearing_y: f32,
|
||||
) -> &GlyphInfo {
|
||||
// Handle zero-size glyphs (e.g., space)
|
||||
if bmp_w == 0 || bmp_h == 0 {
|
||||
self.glyphs.insert(ch, GlyphInfo {
|
||||
uv: [0.0, 0.0, 0.0, 0.0],
|
||||
width: 0.0,
|
||||
height: 0.0,
|
||||
advance,
|
||||
bearing_x,
|
||||
bearing_y,
|
||||
});
|
||||
return self.glyphs.get(&ch).unwrap();
|
||||
}
|
||||
|
||||
// Check if we need to wrap to next row
|
||||
if self.cursor_x + bmp_w > self.atlas_width {
|
||||
self.cursor_y += self.row_height + 1; // +1 pixel gap
|
||||
self.cursor_x = 0;
|
||||
self.row_height = 0;
|
||||
}
|
||||
|
||||
// Check if atlas is full (would overflow vertically)
|
||||
if self.cursor_y + bmp_h > self.atlas_height {
|
||||
// Atlas full — insert with zero UV (glyph won't render but won't crash)
|
||||
self.glyphs.insert(ch, GlyphInfo {
|
||||
uv: [0.0, 0.0, 0.0, 0.0],
|
||||
width: bmp_w as f32,
|
||||
height: bmp_h as f32,
|
||||
advance,
|
||||
bearing_x,
|
||||
bearing_y,
|
||||
});
|
||||
return self.glyphs.get(&ch).unwrap();
|
||||
}
|
||||
|
||||
// Copy bitmap into atlas
|
||||
for row in 0..bmp_h {
|
||||
let src_start = (row * bmp_w) as usize;
|
||||
let src_end = src_start + bmp_w as usize;
|
||||
let dst_y = self.cursor_y + row;
|
||||
let dst_x = self.cursor_x;
|
||||
let dst_start = (dst_y * self.atlas_width + dst_x) as usize;
|
||||
if src_end <= bitmap.len() && dst_start + bmp_w as usize <= self.atlas_data.len() {
|
||||
self.atlas_data[dst_start..dst_start + bmp_w as usize]
|
||||
.copy_from_slice(&bitmap[src_start..src_end]);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate UV coordinates
|
||||
let u0 = self.cursor_x as f32 / self.atlas_width as f32;
|
||||
let v0 = self.cursor_y as f32 / self.atlas_height as f32;
|
||||
let u1 = (self.cursor_x + bmp_w) as f32 / self.atlas_width as f32;
|
||||
let v1 = (self.cursor_y + bmp_h) as f32 / self.atlas_height as f32;
|
||||
|
||||
let info = GlyphInfo {
|
||||
uv: [u0, v0, u1, v1],
|
||||
width: bmp_w as f32,
|
||||
height: bmp_h as f32,
|
||||
advance,
|
||||
bearing_x,
|
||||
bearing_y,
|
||||
};
|
||||
|
||||
// Advance cursor
|
||||
self.cursor_x += bmp_w + 1; // +1 pixel gap
|
||||
if bmp_h > self.row_height {
|
||||
self.row_height = bmp_h;
|
||||
}
|
||||
|
||||
self.dirty = true;
|
||||
self.glyphs.insert(ch, info);
|
||||
self.glyphs.get(&ch).unwrap()
|
||||
}
|
||||
|
||||
pub fn clear_dirty(&mut self) {
|
||||
self.dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_insert_and_get() {
|
||||
let mut cache = GlyphCache::new(256, 256);
|
||||
let bitmap = vec![255u8; 10 * 12]; // 10x12 glyph
|
||||
cache.insert('A', &bitmap, 10, 12, 11.0, 0.5, 10.0);
|
||||
let info = cache.get('A');
|
||||
assert!(info.is_some());
|
||||
let info = info.unwrap();
|
||||
assert!((info.width - 10.0).abs() < 0.1);
|
||||
assert!((info.advance - 11.0).abs() < 0.1);
|
||||
assert!(info.uv[0] >= 0.0 && info.uv[2] <= 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_hit() {
|
||||
let mut cache = GlyphCache::new(256, 256);
|
||||
let bitmap = vec![255u8; 8 * 8];
|
||||
cache.insert('B', &bitmap, 8, 8, 9.0, 0.0, 8.0);
|
||||
cache.clear_dirty();
|
||||
// Second access should be cache hit (no dirty)
|
||||
let _info = cache.get('B').unwrap();
|
||||
assert!(!cache.dirty);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_row_wrap() {
|
||||
let mut cache = GlyphCache::new(64, 64);
|
||||
let bitmap = vec![255u8; 20 * 10];
|
||||
// Insert 4 glyphs of width 20 in a 64-wide atlas
|
||||
// 3 fit in first row (20+1 + 20+1 + 20 = 62), 4th wraps
|
||||
cache.insert('A', &bitmap, 20, 10, 21.0, 0.0, 10.0);
|
||||
cache.insert('B', &bitmap, 20, 10, 21.0, 0.0, 10.0);
|
||||
cache.insert('C', &bitmap, 20, 10, 21.0, 0.0, 10.0);
|
||||
cache.insert('D', &bitmap, 20, 10, 21.0, 0.0, 10.0);
|
||||
let d = cache.get('D').unwrap();
|
||||
// D should be on a different row (v0 > 0)
|
||||
assert!(d.uv[1] > 0.0, "D should be on second row, v0={}", d.uv[1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uv_range() {
|
||||
let mut cache = GlyphCache::new(256, 256);
|
||||
let bitmap = vec![128u8; 15 * 20];
|
||||
cache.insert('X', &bitmap, 15, 20, 16.0, 1.0, 18.0);
|
||||
let info = cache.get('X').unwrap();
|
||||
assert!(info.uv[0] >= 0.0 && info.uv[0] < 1.0);
|
||||
assert!(info.uv[1] >= 0.0 && info.uv[1] < 1.0);
|
||||
assert!(info.uv[2] > info.uv[0] && info.uv[2] <= 1.0);
|
||||
assert!(info.uv[3] > info.uv[1] && info.uv[3] <= 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_size_glyph() {
|
||||
let mut cache = GlyphCache::new(256, 256);
|
||||
cache.insert(' ', &[], 0, 0, 5.0, 0.0, 0.0);
|
||||
let info = cache.get(' ').unwrap();
|
||||
assert!((info.advance - 5.0).abs() < 0.1);
|
||||
assert!((info.width - 0.0).abs() < 0.1);
|
||||
}
|
||||
}
|
||||
307
crates/voltex_editor/src/inspector.rs
Normal file
307
crates/voltex_editor/src/inspector.rs
Normal file
@@ -0,0 +1,307 @@
|
||||
use crate::ui_context::UiContext;
|
||||
use crate::dock::Rect;
|
||||
use crate::layout::LayoutState;
|
||||
use voltex_ecs::world::World;
|
||||
use voltex_ecs::entity::Entity;
|
||||
use voltex_ecs::scene::Tag;
|
||||
use voltex_ecs::hierarchy::{roots, Children, Parent};
|
||||
use voltex_ecs::transform::Transform;
|
||||
|
||||
const COLOR_SELECTED: [u8; 4] = [0x44, 0x66, 0x88, 0xFF];
|
||||
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
|
||||
const LINE_HEIGHT: f32 = 16.0;
|
||||
const INDENT: f32 = 16.0;
|
||||
const PADDING: f32 = 4.0;
|
||||
|
||||
/// Count total nodes in subtree (including self).
|
||||
pub fn count_entity_nodes(world: &World, entity: Entity) -> usize {
|
||||
let mut count = 1;
|
||||
if let Some(children) = world.get::<Children>(entity) {
|
||||
for &child in &children.0 {
|
||||
count += count_entity_nodes(world, child);
|
||||
}
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
/// Draw a single entity row, then recurse into children.
|
||||
fn draw_entity_node(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
entity: Entity,
|
||||
depth: usize,
|
||||
selected: &mut Option<Entity>,
|
||||
base_x: f32,
|
||||
row_w: f32,
|
||||
) {
|
||||
let y = ui.layout.cursor_y;
|
||||
let x = base_x + PADDING + depth as f32 * INDENT;
|
||||
|
||||
// Highlight selected
|
||||
if *selected == Some(entity) {
|
||||
ui.draw_list.add_rect(base_x, y, row_w, LINE_HEIGHT, COLOR_SELECTED);
|
||||
}
|
||||
|
||||
// Click detection
|
||||
if ui.mouse_clicked && ui.mouse_in_rect(base_x, y, row_w, LINE_HEIGHT) {
|
||||
*selected = Some(entity);
|
||||
}
|
||||
|
||||
// Build label
|
||||
let has_children = world.get::<Children>(entity).map_or(false, |c| !c.0.is_empty());
|
||||
let prefix = if has_children { "> " } else { " " };
|
||||
let name = if let Some(tag) = world.get::<Tag>(entity) {
|
||||
format!("{}{}", prefix, tag.0)
|
||||
} else {
|
||||
format!("{}Entity({})", prefix, entity.id)
|
||||
};
|
||||
|
||||
// Draw text (inline glyph rendering)
|
||||
let gw = ui.font.glyph_width as f32;
|
||||
let gh = ui.font.glyph_height as f32;
|
||||
let text_y = y + (LINE_HEIGHT - gh) * 0.5;
|
||||
let mut cx = x;
|
||||
for ch in name.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += gw;
|
||||
}
|
||||
|
||||
ui.layout.cursor_y += LINE_HEIGHT;
|
||||
|
||||
// Recurse children
|
||||
if let Some(children) = world.get::<Children>(entity) {
|
||||
let child_list: Vec<Entity> = children.0.clone();
|
||||
for child in child_list {
|
||||
draw_entity_node(ui, world, child, depth + 1, selected, base_x, row_w);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Hierarchy panel: displays entity tree, handles selection.
|
||||
pub fn hierarchy_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
selected: &mut Option<Entity>,
|
||||
rect: &Rect,
|
||||
) {
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
let root_entities = roots(world);
|
||||
for &entity in &root_entities {
|
||||
draw_entity_node(ui, world, entity, 0, selected, rect.x, rect.w);
|
||||
}
|
||||
}
|
||||
|
||||
/// Inspector panel: edit Transform, Tag, Parent for selected entity.
|
||||
/// `tag_buffer` is caller-owned staging buffer for Tag text input.
|
||||
pub fn inspector_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &mut World,
|
||||
selected: Option<Entity>,
|
||||
rect: &Rect,
|
||||
tag_buffer: &mut String,
|
||||
) {
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
|
||||
let entity = match selected {
|
||||
Some(e) => e,
|
||||
None => {
|
||||
ui.text("No entity selected");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// --- Transform ---
|
||||
if world.has_component::<Transform>(entity) {
|
||||
ui.text("-- Transform --");
|
||||
|
||||
// Copy out values (immutable borrow ends with block)
|
||||
let (px, py, pz, rx, ry, rz, sx, sy, sz) = {
|
||||
let t = world.get::<Transform>(entity).unwrap();
|
||||
(t.position.x, t.position.y, t.position.z,
|
||||
t.rotation.x, t.rotation.y, t.rotation.z,
|
||||
t.scale.x, t.scale.y, t.scale.z)
|
||||
};
|
||||
|
||||
// Sliders (no world borrow active)
|
||||
let new_px = ui.slider("Pos X", px, -50.0, 50.0);
|
||||
let new_py = ui.slider("Pos Y", py, -50.0, 50.0);
|
||||
let new_pz = ui.slider("Pos Z", pz, -50.0, 50.0);
|
||||
let new_rx = ui.slider("Rot X", rx, -3.15, 3.15);
|
||||
let new_ry = ui.slider("Rot Y", ry, -3.15, 3.15);
|
||||
let new_rz = ui.slider("Rot Z", rz, -3.15, 3.15);
|
||||
let new_sx = ui.slider("Scl X", sx, 0.01, 10.0);
|
||||
let new_sy = ui.slider("Scl Y", sy, 0.01, 10.0);
|
||||
let new_sz = ui.slider("Scl Z", sz, 0.01, 10.0);
|
||||
|
||||
// Write back (mutable borrow)
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position.x = new_px;
|
||||
t.position.y = new_py;
|
||||
t.position.z = new_pz;
|
||||
t.rotation.x = new_rx;
|
||||
t.rotation.y = new_ry;
|
||||
t.rotation.z = new_rz;
|
||||
t.scale.x = new_sx;
|
||||
t.scale.y = new_sy;
|
||||
t.scale.z = new_sz;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Tag ---
|
||||
if world.has_component::<Tag>(entity) {
|
||||
ui.text("-- Tag --");
|
||||
|
||||
// Sync buffer from world
|
||||
if let Some(tag) = world.get::<Tag>(entity) {
|
||||
if tag_buffer.is_empty() || *tag_buffer != tag.0 {
|
||||
*tag_buffer = tag.0.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let input_x = rect.x + PADDING;
|
||||
let input_y = ui.layout.cursor_y;
|
||||
let input_w = (rect.w - PADDING * 2.0).max(50.0);
|
||||
if ui.text_input(8888, tag_buffer, input_x, input_y, input_w) {
|
||||
if let Some(tag) = world.get_mut::<Tag>(entity) {
|
||||
tag.0 = tag_buffer.clone();
|
||||
}
|
||||
}
|
||||
ui.layout.advance_line();
|
||||
}
|
||||
|
||||
// --- Parent ---
|
||||
if let Some(parent) = world.get::<Parent>(entity) {
|
||||
ui.text("-- Parent --");
|
||||
let parent_text = format!("Parent: Entity({})", parent.0.id);
|
||||
ui.text(&parent_text);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use voltex_ecs::transform::Transform;
|
||||
use voltex_math::Vec3;
|
||||
|
||||
fn make_test_world() -> (World, Entity, Entity, Entity) {
|
||||
let mut world = World::new();
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Transform::from_position(Vec3::new(0.0, 0.0, 0.0)));
|
||||
world.add(e1, Tag("Root".to_string()));
|
||||
|
||||
let e2 = world.spawn();
|
||||
world.add(e2, Transform::from_position(Vec3::new(1.0, 0.0, 0.0)));
|
||||
world.add(e2, Tag("Child1".to_string()));
|
||||
|
||||
let e3 = world.spawn();
|
||||
world.add(e3, Transform::from_position(Vec3::new(2.0, 0.0, 0.0)));
|
||||
world.add(e3, Tag("Child2".to_string()));
|
||||
|
||||
voltex_ecs::hierarchy::add_child(&mut world, e1, e2);
|
||||
voltex_ecs::hierarchy::add_child(&mut world, e1, e3);
|
||||
|
||||
(world, e1, e2, e3)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_nodes() {
|
||||
let (world, e1, _, _) = make_test_world();
|
||||
assert_eq!(count_entity_nodes(&world, e1), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_draws_commands() {
|
||||
let (world, _, _, _) = make_test_world();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(ui.draw_list.commands.len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_click_selects() {
|
||||
let (world, e1, _, _) = make_test_world();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
// Click on first row (y = PADDING + small offset)
|
||||
ui.begin_frame(50.0, PADDING + 2.0, true);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert_eq!(selected, Some(e1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_empty_world() {
|
||||
let world = World::new();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(selected.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inspector_no_selection() {
|
||||
let mut world = World::new();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, None, &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// "No entity selected" text produces draw commands
|
||||
assert!(ui.draw_list.commands.len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inspector_with_transform() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// Header + 9 sliders produce many draw commands
|
||||
assert!(ui.draw_list.commands.len() > 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inspector_with_tag() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::new());
|
||||
world.add(e, Tag("TestTag".to_string()));
|
||||
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// tag_buf should be synced from world
|
||||
assert_eq!(tag_buf, "TestTag");
|
||||
}
|
||||
}
|
||||
@@ -4,9 +4,30 @@ pub mod layout;
|
||||
pub mod renderer;
|
||||
pub mod ui_context;
|
||||
pub mod widgets;
|
||||
pub mod dock;
|
||||
pub mod inspector;
|
||||
pub mod orbit_camera;
|
||||
|
||||
pub use font::FontAtlas;
|
||||
pub use draw_list::{DrawVertex, DrawCommand, DrawList};
|
||||
pub use layout::LayoutState;
|
||||
pub use renderer::UiRenderer;
|
||||
pub use ui_context::UiContext;
|
||||
pub use dock::{DockTree, DockNode, Axis, Rect, LeafLayout};
|
||||
pub use inspector::{hierarchy_panel, inspector_panel};
|
||||
pub use orbit_camera::OrbitCamera;
|
||||
|
||||
pub mod viewport_texture;
|
||||
pub use viewport_texture::{ViewportTexture, VIEWPORT_COLOR_FORMAT, VIEWPORT_DEPTH_FORMAT};
|
||||
|
||||
pub mod viewport_renderer;
|
||||
pub use viewport_renderer::ViewportRenderer;
|
||||
|
||||
pub mod asset_browser;
|
||||
pub use asset_browser::{AssetBrowser, asset_browser_panel};
|
||||
|
||||
pub mod ttf_parser;
|
||||
pub mod rasterizer;
|
||||
pub mod glyph_cache;
|
||||
pub mod ttf_font;
|
||||
pub use ttf_font::TtfFont;
|
||||
|
||||
166
crates/voltex_editor/src/orbit_camera.rs
Normal file
166
crates/voltex_editor/src/orbit_camera.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use voltex_math::{Vec3, Mat4};
|
||||
use std::f32::consts::PI;
|
||||
|
||||
const PITCH_LIMIT: f32 = PI / 2.0 - 0.01;
|
||||
const MIN_DISTANCE: f32 = 0.5;
|
||||
const MAX_DISTANCE: f32 = 50.0;
|
||||
const ORBIT_SENSITIVITY: f32 = 0.005;
|
||||
const ZOOM_FACTOR: f32 = 0.1;
|
||||
const PAN_SENSITIVITY: f32 = 0.01;
|
||||
|
||||
pub struct OrbitCamera {
|
||||
pub target: Vec3,
|
||||
pub distance: f32,
|
||||
pub yaw: f32,
|
||||
pub pitch: f32,
|
||||
pub fov_y: f32,
|
||||
pub near: f32,
|
||||
pub far: f32,
|
||||
}
|
||||
|
||||
impl OrbitCamera {
|
||||
pub fn new() -> Self {
|
||||
OrbitCamera {
|
||||
target: Vec3::ZERO,
|
||||
distance: 5.0,
|
||||
yaw: 0.0,
|
||||
pitch: 0.3,
|
||||
fov_y: PI / 4.0,
|
||||
near: 0.1,
|
||||
far: 100.0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn position(&self) -> Vec3 {
|
||||
let cp = self.pitch.cos();
|
||||
let sp = self.pitch.sin();
|
||||
let cy = self.yaw.cos();
|
||||
let sy = self.yaw.sin();
|
||||
Vec3::new(
|
||||
self.target.x + self.distance * cp * sy,
|
||||
self.target.y + self.distance * sp,
|
||||
self.target.z + self.distance * cp * cy,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn orbit(&mut self, dx: f32, dy: f32) {
|
||||
self.yaw += dx * ORBIT_SENSITIVITY;
|
||||
self.pitch += dy * ORBIT_SENSITIVITY;
|
||||
self.pitch = self.pitch.clamp(-PITCH_LIMIT, PITCH_LIMIT);
|
||||
}
|
||||
|
||||
pub fn zoom(&mut self, delta: f32) {
|
||||
self.distance *= 1.0 - delta * ZOOM_FACTOR;
|
||||
self.distance = self.distance.clamp(MIN_DISTANCE, MAX_DISTANCE);
|
||||
}
|
||||
|
||||
pub fn pan(&mut self, dx: f32, dy: f32) {
|
||||
let forward = (self.target - self.position()).normalize();
|
||||
let right = forward.cross(Vec3::Y);
|
||||
let right = if right.length() < 1e-4 { Vec3::X } else { right.normalize() };
|
||||
let up = right.cross(forward).normalize();
|
||||
let offset_x = right * (-dx * PAN_SENSITIVITY * self.distance);
|
||||
let offset_y = up * (dy * PAN_SENSITIVITY * self.distance);
|
||||
self.target = self.target + offset_x + offset_y;
|
||||
}
|
||||
|
||||
pub fn view_matrix(&self) -> Mat4 {
|
||||
Mat4::look_at(self.position(), self.target, Vec3::Y)
|
||||
}
|
||||
|
||||
pub fn projection_matrix(&self, aspect: f32) -> Mat4 {
|
||||
Mat4::perspective(self.fov_y, aspect, self.near, self.far)
|
||||
}
|
||||
|
||||
pub fn view_projection(&self, aspect: f32) -> Mat4 {
|
||||
self.projection_matrix(aspect).mul_mat4(&self.view_matrix())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_position() {
|
||||
let cam = OrbitCamera::new();
|
||||
let pos = cam.position();
|
||||
assert!((pos.x).abs() < 1e-3);
|
||||
assert!(pos.y > 0.0);
|
||||
assert!(pos.z > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orbit_changes_yaw_pitch() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let old_yaw = cam.yaw;
|
||||
let old_pitch = cam.pitch;
|
||||
cam.orbit(100.0, 50.0);
|
||||
assert!((cam.yaw - old_yaw - 100.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
|
||||
assert!((cam.pitch - old_pitch - 50.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pitch_clamped() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
cam.orbit(0.0, 100000.0);
|
||||
assert!(cam.pitch <= PITCH_LIMIT);
|
||||
cam.orbit(0.0, -200000.0);
|
||||
assert!(cam.pitch >= -PITCH_LIMIT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zoom_changes_distance() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let d0 = cam.distance;
|
||||
cam.zoom(1.0);
|
||||
assert!(cam.distance < d0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zoom_clamped() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
cam.zoom(1000.0);
|
||||
assert!(cam.distance >= MIN_DISTANCE);
|
||||
cam.zoom(-10000.0);
|
||||
assert!(cam.distance <= MAX_DISTANCE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pan_moves_target() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let t0 = cam.target;
|
||||
cam.pan(10.0, 0.0);
|
||||
assert!((cam.target.x - t0.x).abs() > 1e-4 || (cam.target.z - t0.z).abs() > 1e-4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_view_matrix_not_zero() {
|
||||
let cam = OrbitCamera::new();
|
||||
let v = cam.view_matrix();
|
||||
let sum: f32 = v.cols.iter().flat_map(|c| c.iter()).map(|x| x.abs()).sum();
|
||||
assert!(sum > 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_projection_matrix() {
|
||||
let cam = OrbitCamera::new();
|
||||
let p = cam.projection_matrix(16.0 / 9.0);
|
||||
assert!(p.cols[0][0] > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_view_projection() {
|
||||
let cam = OrbitCamera::new();
|
||||
let vp = cam.view_projection(1.0);
|
||||
let v = cam.view_matrix();
|
||||
let p = cam.projection_matrix(1.0);
|
||||
let expected = p.mul_mat4(&v);
|
||||
for i in 0..4 {
|
||||
for j in 0..4 {
|
||||
assert!((vp.cols[i][j] - expected.cols[i][j]).abs() < 1e-4,
|
||||
"mismatch at [{i}][{j}]: {} vs {}", vp.cols[i][j], expected.cols[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
220
crates/voltex_editor/src/rasterizer.rs
Normal file
220
crates/voltex_editor/src/rasterizer.rs
Normal file
@@ -0,0 +1,220 @@
|
||||
use crate::ttf_parser::GlyphOutline;
|
||||
|
||||
/// Result of rasterizing a single glyph.
|
||||
pub struct RasterResult {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
/// R8 alpha bitmap, row-major.
|
||||
pub bitmap: Vec<u8>,
|
||||
/// Left bearing in pixels.
|
||||
pub offset_x: f32,
|
||||
/// Distance from baseline to top of glyph bbox in pixels.
|
||||
pub offset_y: f32,
|
||||
}
|
||||
|
||||
/// Recursively flatten a quadratic bezier into line segments.
|
||||
pub fn flatten_quad(
|
||||
x0: f32, y0: f32,
|
||||
cx: f32, cy: f32,
|
||||
x1: f32, y1: f32,
|
||||
edges: &mut Vec<(f32, f32, f32, f32)>,
|
||||
) {
|
||||
// Check if curve is flat enough (midpoint distance < 0.5px)
|
||||
let mx = (x0 + 2.0 * cx + x1) / 4.0;
|
||||
let my = (y0 + 2.0 * cy + y1) / 4.0;
|
||||
let lx = (x0 + x1) / 2.0;
|
||||
let ly = (y0 + y1) / 2.0;
|
||||
let dx = mx - lx;
|
||||
let dy = my - ly;
|
||||
if dx * dx + dy * dy < 0.25 {
|
||||
edges.push((x0, y0, x1, y1));
|
||||
} else {
|
||||
// Subdivide at t=0.5
|
||||
let m01x = (x0 + cx) / 2.0;
|
||||
let m01y = (y0 + cy) / 2.0;
|
||||
let m12x = (cx + x1) / 2.0;
|
||||
let m12y = (cy + y1) / 2.0;
|
||||
let midx = (m01x + m12x) / 2.0;
|
||||
let midy = (m01y + m12y) / 2.0;
|
||||
flatten_quad(x0, y0, m01x, m01y, midx, midy, edges);
|
||||
flatten_quad(midx, midy, m12x, m12y, x1, y1, edges);
|
||||
}
|
||||
}
|
||||
|
||||
/// Rasterize a glyph outline at the given scale into an alpha bitmap.
|
||||
pub fn rasterize(outline: &GlyphOutline, scale: f32) -> RasterResult {
|
||||
// Handle empty outline (e.g., space)
|
||||
if outline.contours.is_empty() || outline.x_max <= outline.x_min {
|
||||
return RasterResult {
|
||||
width: 0,
|
||||
height: 0,
|
||||
bitmap: vec![],
|
||||
offset_x: 0.0,
|
||||
offset_y: 0.0,
|
||||
};
|
||||
}
|
||||
|
||||
let x_min = outline.x_min as f32;
|
||||
let y_min = outline.y_min as f32;
|
||||
let x_max = outline.x_max as f32;
|
||||
let y_max = outline.y_max as f32;
|
||||
|
||||
let w = ((x_max - x_min) * scale).ceil() as u32 + 2;
|
||||
let h = ((y_max - y_min) * scale).ceil() as u32 + 2;
|
||||
|
||||
// Transform a font-space coordinate to bitmap pixel space.
|
||||
let transform = |px: f32, py: f32| -> (f32, f32) {
|
||||
let bx = (px - x_min) * scale + 1.0;
|
||||
let by = (y_max - py) * scale + 1.0;
|
||||
(bx, by)
|
||||
};
|
||||
|
||||
// Build edges from contours
|
||||
let mut edges: Vec<(f32, f32, f32, f32)> = Vec::new();
|
||||
|
||||
for contour in &outline.contours {
|
||||
let n = contour.len();
|
||||
if n < 2 {
|
||||
continue;
|
||||
}
|
||||
let mut i = 0;
|
||||
while i < n {
|
||||
let p0 = &contour[i];
|
||||
let p1 = &contour[(i + 1) % n];
|
||||
if p0.on_curve && p1.on_curve {
|
||||
// Line segment
|
||||
let (px0, py0) = transform(p0.x, p0.y);
|
||||
let (px1, py1) = transform(p1.x, p1.y);
|
||||
edges.push((px0, py0, px1, py1));
|
||||
i += 1;
|
||||
} else if p0.on_curve && !p1.on_curve {
|
||||
// Quadratic bezier: p0 -> p1(control) -> p2(on_curve)
|
||||
let p2 = &contour[(i + 2) % n];
|
||||
let (px0, py0) = transform(p0.x, p0.y);
|
||||
let (cx, cy) = transform(p1.x, p1.y);
|
||||
let (px1, py1) = transform(p2.x, p2.y);
|
||||
flatten_quad(px0, py0, cx, cy, px1, py1, &mut edges);
|
||||
i += 2;
|
||||
} else {
|
||||
// Skip unexpected off-curve start
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Scanline fill (non-zero winding)
|
||||
let mut bitmap = vec![0u8; (w * h) as usize];
|
||||
for row in 0..h {
|
||||
let scan_y = row as f32 + 0.5;
|
||||
let mut intersections: Vec<(f32, i32)> = Vec::new();
|
||||
|
||||
for &(x0, y0, x1, y1) in &edges {
|
||||
if (y0 <= scan_y && y1 > scan_y) || (y1 <= scan_y && y0 > scan_y) {
|
||||
let t = (scan_y - y0) / (y1 - y0);
|
||||
let ix = x0 + t * (x1 - x0);
|
||||
let dir = if y1 > y0 { 1 } else { -1 };
|
||||
intersections.push((ix, dir));
|
||||
}
|
||||
}
|
||||
|
||||
intersections.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
|
||||
|
||||
// Fill using non-zero winding rule
|
||||
let mut winding = 0i32;
|
||||
let mut fill_start = 0.0f32;
|
||||
for &(x, dir) in &intersections {
|
||||
let old_winding = winding;
|
||||
winding += dir;
|
||||
if old_winding == 0 && winding != 0 {
|
||||
fill_start = x;
|
||||
}
|
||||
if old_winding != 0 && winding == 0 {
|
||||
let px_start = (fill_start.floor() as i32).max(0) as u32;
|
||||
let px_end = (x.ceil() as u32).min(w);
|
||||
for px in px_start..px_end {
|
||||
bitmap[(row * w + px) as usize] = 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let offset_x = x_min * scale;
|
||||
let offset_y = y_max * scale;
|
||||
|
||||
RasterResult {
|
||||
width: w,
|
||||
height: h,
|
||||
bitmap,
|
||||
offset_x,
|
||||
offset_y,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ttf_parser::TtfParser;
|
||||
|
||||
fn load_test_font() -> Option<TtfParser> {
|
||||
let paths = [
|
||||
"C:/Windows/Fonts/arial.ttf",
|
||||
"C:/Windows/Fonts/consola.ttf",
|
||||
];
|
||||
for p in &paths {
|
||||
if let Ok(data) = std::fs::read(p) {
|
||||
if let Ok(parser) = TtfParser::parse(data) {
|
||||
return Some(parser);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rasterize_produces_bitmap() {
|
||||
let parser = load_test_font().expect("no font");
|
||||
let gid = parser.glyph_index(0x41); // 'A'
|
||||
let outline = parser.glyph_outline(gid).expect("no outline");
|
||||
let scale = 32.0 / parser.units_per_em as f32;
|
||||
let result = rasterize(&outline, scale);
|
||||
assert!(result.width > 0);
|
||||
assert!(result.height > 0);
|
||||
assert!(!result.bitmap.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rasterize_has_filled_pixels() {
|
||||
let parser = load_test_font().expect("no font");
|
||||
let gid = parser.glyph_index(0x41);
|
||||
let outline = parser.glyph_outline(gid).expect("no outline");
|
||||
let scale = 32.0 / parser.units_per_em as f32;
|
||||
let result = rasterize(&outline, scale);
|
||||
let filled = result.bitmap.iter().filter(|&&b| b > 0).count();
|
||||
assert!(filled > 0, "bitmap should have filled pixels");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rasterize_empty() {
|
||||
let parser = load_test_font().expect("no font");
|
||||
let gid = parser.glyph_index(0x20); // space
|
||||
let outline = parser.glyph_outline(gid);
|
||||
if let Some(o) = outline {
|
||||
let scale = 32.0 / parser.units_per_em as f32;
|
||||
let result = rasterize(&o, scale);
|
||||
// Space should be empty or zero-sized
|
||||
assert!(
|
||||
result.width == 0 || result.bitmap.iter().all(|&b| b == 0)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flatten_quad_produces_edges() {
|
||||
let mut edges = Vec::new();
|
||||
flatten_quad(0.0, 0.0, 5.0, 10.0, 10.0, 0.0, &mut edges);
|
||||
assert!(
|
||||
edges.len() >= 2,
|
||||
"bezier should flatten to multiple segments"
|
||||
);
|
||||
}
|
||||
}
|
||||
153
crates/voltex_editor/src/ttf_font.rs
Normal file
153
crates/voltex_editor/src/ttf_font.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use crate::ttf_parser::TtfParser;
|
||||
use crate::rasterizer::rasterize;
|
||||
use crate::glyph_cache::{GlyphCache, GlyphInfo, ATLAS_SIZE};
|
||||
|
||||
pub struct TtfFont {
|
||||
parser: TtfParser,
|
||||
cache: GlyphCache,
|
||||
pub font_size: f32,
|
||||
pub line_height: f32,
|
||||
pub ascender: f32,
|
||||
pub descender: f32,
|
||||
scale: f32,
|
||||
}
|
||||
|
||||
impl TtfFont {
|
||||
pub fn new(data: &[u8], font_size: f32) -> Result<Self, String> {
|
||||
let parser = TtfParser::parse(data.to_vec())?;
|
||||
let scale = font_size / parser.units_per_em as f32;
|
||||
let ascender = parser.ascender as f32 * scale;
|
||||
let descender = parser.descender as f32 * scale;
|
||||
let line_height = (parser.ascender - parser.descender + parser.line_gap) as f32 * scale;
|
||||
let cache = GlyphCache::new(ATLAS_SIZE, ATLAS_SIZE);
|
||||
|
||||
Ok(TtfFont {
|
||||
parser,
|
||||
cache,
|
||||
font_size,
|
||||
line_height,
|
||||
ascender,
|
||||
descender,
|
||||
scale,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get glyph info for a character, rasterizing on cache miss.
|
||||
pub fn glyph(&mut self, ch: char) -> &GlyphInfo {
|
||||
if self.cache.get(ch).is_some() {
|
||||
return self.cache.get(ch).unwrap();
|
||||
}
|
||||
|
||||
let glyph_id = self.parser.glyph_index(ch as u32);
|
||||
let metrics = self.parser.glyph_metrics(glyph_id);
|
||||
let advance = metrics.advance_width as f32 * self.scale;
|
||||
let bearing_x = metrics.left_side_bearing as f32 * self.scale;
|
||||
|
||||
let outline = self.parser.glyph_outline(glyph_id);
|
||||
|
||||
match outline {
|
||||
Some(ref o) if !o.contours.is_empty() => {
|
||||
let result = rasterize(o, self.scale);
|
||||
// bearing_y = y_max * scale (distance above baseline)
|
||||
// When rendering: glyph_y = baseline_y - bearing_y
|
||||
// For our UI: text_y is the top of the line, baseline = text_y + ascender
|
||||
// So: glyph_y = text_y + ascender - bearing_y
|
||||
let bearing_y = o.y_max as f32 * self.scale;
|
||||
|
||||
self.cache.insert(
|
||||
ch,
|
||||
&result.bitmap,
|
||||
result.width,
|
||||
result.height,
|
||||
advance,
|
||||
bearing_x,
|
||||
bearing_y,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
// No outline (space, etc.) — insert empty glyph
|
||||
self.cache.insert(ch, &[], 0, 0, advance, 0.0, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
self.cache.get(ch).unwrap()
|
||||
}
|
||||
|
||||
/// Calculate total width of a text string.
|
||||
pub fn text_width(&mut self, text: &str) -> f32 {
|
||||
let mut width = 0.0;
|
||||
for ch in text.chars() {
|
||||
let info = self.glyph(ch);
|
||||
width += info.advance;
|
||||
}
|
||||
width
|
||||
}
|
||||
|
||||
pub fn atlas_data(&self) -> &[u8] {
|
||||
&self.cache.atlas_data
|
||||
}
|
||||
|
||||
pub fn atlas_size(&self) -> (u32, u32) {
|
||||
(self.cache.atlas_width, self.cache.atlas_height)
|
||||
}
|
||||
|
||||
pub fn is_dirty(&self) -> bool {
|
||||
self.cache.dirty
|
||||
}
|
||||
|
||||
pub fn clear_dirty(&mut self) {
|
||||
self.cache.clear_dirty();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn load_test_font() -> Option<TtfFont> {
|
||||
let paths = ["C:/Windows/Fonts/arial.ttf", "C:/Windows/Fonts/consola.ttf"];
|
||||
for p in &paths {
|
||||
if let Ok(data) = std::fs::read(p) {
|
||||
if let Ok(font) = TtfFont::new(&data, 24.0) {
|
||||
return Some(font);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new() {
|
||||
let font = load_test_font().expect("no test font");
|
||||
assert!(font.font_size == 24.0);
|
||||
assert!(font.ascender > 0.0);
|
||||
assert!(font.line_height > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_glyph_caches() {
|
||||
let mut font = load_test_font().expect("no test font");
|
||||
let _g1 = font.glyph('A');
|
||||
assert!(font.is_dirty());
|
||||
font.clear_dirty();
|
||||
let _g2 = font.glyph('A'); // cache hit
|
||||
assert!(!font.is_dirty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_width() {
|
||||
let mut font = load_test_font().expect("no test font");
|
||||
let w1 = font.text_width("Hello");
|
||||
let w2 = font.text_width("Hello World");
|
||||
assert!(w1 > 0.0);
|
||||
assert!(w2 > w1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_space_glyph() {
|
||||
let mut font = load_test_font().expect("no test font");
|
||||
let info = font.glyph(' ');
|
||||
assert!(info.advance > 0.0); // space has advance but no bitmap
|
||||
assert!(info.width == 0.0);
|
||||
}
|
||||
}
|
||||
518
crates/voltex_editor/src/ttf_parser.rs
Normal file
518
crates/voltex_editor/src/ttf_parser.rs
Normal file
@@ -0,0 +1,518 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
// --- Byte helpers (big-endian) ---
|
||||
|
||||
fn read_u8(data: &[u8], off: usize) -> u8 {
|
||||
data[off]
|
||||
}
|
||||
|
||||
fn read_u16(data: &[u8], off: usize) -> u16 {
|
||||
u16::from_be_bytes([data[off], data[off + 1]])
|
||||
}
|
||||
|
||||
fn read_i16(data: &[u8], off: usize) -> i16 {
|
||||
i16::from_be_bytes([data[off], data[off + 1]])
|
||||
}
|
||||
|
||||
fn read_u32(data: &[u8], off: usize) -> u32 {
|
||||
u32::from_be_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]])
|
||||
}
|
||||
|
||||
// --- Data types ---
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OutlinePoint {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub on_curve: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GlyphOutline {
|
||||
pub contours: Vec<Vec<OutlinePoint>>,
|
||||
pub x_min: i16,
|
||||
pub y_min: i16,
|
||||
pub x_max: i16,
|
||||
pub y_max: i16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct GlyphMetrics {
|
||||
pub advance_width: u16,
|
||||
pub left_side_bearing: i16,
|
||||
}
|
||||
|
||||
// --- Flag bits for simple glyph parsing ---
|
||||
const ON_CURVE: u8 = 0x01;
|
||||
const X_SHORT: u8 = 0x02;
|
||||
const Y_SHORT: u8 = 0x04;
|
||||
const REPEAT_FLAG: u8 = 0x08;
|
||||
const X_SAME_OR_POS: u8 = 0x10;
|
||||
const Y_SAME_OR_POS: u8 = 0x20;
|
||||
|
||||
// --- TtfParser ---
|
||||
|
||||
pub struct TtfParser {
|
||||
data: Vec<u8>,
|
||||
pub tables: HashMap<[u8; 4], (u32, u32)>, // tag -> (offset, length)
|
||||
pub units_per_em: u16,
|
||||
pub num_glyphs: u16,
|
||||
pub ascender: i16,
|
||||
pub descender: i16,
|
||||
pub line_gap: i16,
|
||||
pub num_h_metrics: u16,
|
||||
pub loca_format: i16,
|
||||
}
|
||||
|
||||
impl TtfParser {
|
||||
/// Parse a TTF file from raw bytes.
|
||||
pub fn parse(data: Vec<u8>) -> Result<Self, String> {
|
||||
if data.len() < 12 {
|
||||
return Err("File too short for offset table".into());
|
||||
}
|
||||
|
||||
let _sf_version = read_u32(&data, 0);
|
||||
let num_tables = read_u16(&data, 4) as usize;
|
||||
|
||||
if data.len() < 12 + num_tables * 16 {
|
||||
return Err("File too short for table records".into());
|
||||
}
|
||||
|
||||
let mut tables = HashMap::new();
|
||||
for i in 0..num_tables {
|
||||
let rec_off = 12 + i * 16;
|
||||
let mut tag = [0u8; 4];
|
||||
tag.copy_from_slice(&data[rec_off..rec_off + 4]);
|
||||
let offset = read_u32(&data, rec_off + 8);
|
||||
let length = read_u32(&data, rec_off + 12);
|
||||
tables.insert(tag, (offset, length));
|
||||
}
|
||||
|
||||
// Parse head table
|
||||
let &(head_off, _) = tables
|
||||
.get(b"head")
|
||||
.ok_or("Missing head table")?;
|
||||
let head_off = head_off as usize;
|
||||
let units_per_em = read_u16(&data, head_off + 18);
|
||||
let loca_format = read_i16(&data, head_off + 50);
|
||||
|
||||
// Parse hhea table
|
||||
let &(hhea_off, _) = tables
|
||||
.get(b"hhea")
|
||||
.ok_or("Missing hhea table")?;
|
||||
let hhea_off = hhea_off as usize;
|
||||
let ascender = read_i16(&data, hhea_off + 4);
|
||||
let descender = read_i16(&data, hhea_off + 6);
|
||||
let line_gap = read_i16(&data, hhea_off + 8);
|
||||
let num_h_metrics = read_u16(&data, hhea_off + 34);
|
||||
|
||||
// Parse maxp table
|
||||
let &(maxp_off, _) = tables
|
||||
.get(b"maxp")
|
||||
.ok_or("Missing maxp table")?;
|
||||
let maxp_off = maxp_off as usize;
|
||||
let num_glyphs = read_u16(&data, maxp_off + 4);
|
||||
|
||||
Ok(Self {
|
||||
data,
|
||||
tables,
|
||||
units_per_em,
|
||||
num_glyphs,
|
||||
ascender,
|
||||
descender,
|
||||
line_gap,
|
||||
num_h_metrics,
|
||||
loca_format,
|
||||
})
|
||||
}
|
||||
|
||||
/// Look up the glyph index for a Unicode codepoint via cmap Format 4.
|
||||
pub fn glyph_index(&self, codepoint: u32) -> u16 {
|
||||
let &(cmap_off, _) = match self.tables.get(b"cmap") {
|
||||
Some(v) => v,
|
||||
None => return 0,
|
||||
};
|
||||
let cmap_off = cmap_off as usize;
|
||||
let num_subtables = read_u16(&self.data, cmap_off + 2) as usize;
|
||||
|
||||
// Find a Format 4 subtable (prefer platform 3 encoding 1, or platform 0)
|
||||
let mut fmt4_offset: Option<usize> = None;
|
||||
for i in 0..num_subtables {
|
||||
let rec = cmap_off + 4 + i * 8;
|
||||
let platform_id = read_u16(&self.data, rec);
|
||||
let encoding_id = read_u16(&self.data, rec + 2);
|
||||
let sub_offset = read_u32(&self.data, rec + 4) as usize;
|
||||
let abs_off = cmap_off + sub_offset;
|
||||
|
||||
if abs_off + 2 > self.data.len() {
|
||||
continue;
|
||||
}
|
||||
let format = read_u16(&self.data, abs_off);
|
||||
if format == 4 {
|
||||
// Prefer Windows Unicode BMP (3,1)
|
||||
if platform_id == 3 && encoding_id == 1 {
|
||||
fmt4_offset = Some(abs_off);
|
||||
break;
|
||||
}
|
||||
// Accept platform 0 as fallback
|
||||
if platform_id == 0 && fmt4_offset.is_none() {
|
||||
fmt4_offset = Some(abs_off);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let sub_off = match fmt4_offset {
|
||||
Some(o) => o,
|
||||
None => return 0,
|
||||
};
|
||||
|
||||
// Parse Format 4
|
||||
let seg_count_x2 = read_u16(&self.data, sub_off + 6) as usize;
|
||||
let seg_count = seg_count_x2 / 2;
|
||||
|
||||
let end_code_base = sub_off + 14;
|
||||
let start_code_base = end_code_base + seg_count * 2 + 2; // +2 for reservedPad
|
||||
let id_delta_base = start_code_base + seg_count * 2;
|
||||
let id_range_offset_base = id_delta_base + seg_count * 2;
|
||||
|
||||
for i in 0..seg_count {
|
||||
let end_code = read_u16(&self.data, end_code_base + i * 2) as u32;
|
||||
let start_code = read_u16(&self.data, start_code_base + i * 2) as u32;
|
||||
|
||||
if end_code >= codepoint && start_code <= codepoint {
|
||||
let id_delta = read_i16(&self.data, id_delta_base + i * 2);
|
||||
let id_range_offset = read_u16(&self.data, id_range_offset_base + i * 2) as usize;
|
||||
|
||||
if id_range_offset == 0 {
|
||||
return (codepoint as i32 + id_delta as i32) as u16;
|
||||
} else {
|
||||
let offset_in_bytes =
|
||||
id_range_offset + 2 * (codepoint - start_code) as usize;
|
||||
let glyph_addr = id_range_offset_base + i * 2 + offset_in_bytes;
|
||||
if glyph_addr + 1 < self.data.len() {
|
||||
let glyph = read_u16(&self.data, glyph_addr);
|
||||
if glyph != 0 {
|
||||
return (glyph as i32 + id_delta as i32) as u16;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
/// Get the offset of a glyph in the glyf table using loca.
|
||||
fn glyph_offset(&self, glyph_id: u16) -> Option<(usize, usize)> {
|
||||
let &(loca_off, _) = self.tables.get(b"loca")?;
|
||||
let &(glyf_off, _) = self.tables.get(b"glyf")?;
|
||||
let loca_off = loca_off as usize;
|
||||
let glyf_off = glyf_off as usize;
|
||||
|
||||
if glyph_id >= self.num_glyphs {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (offset, next_offset) = if self.loca_format == 0 {
|
||||
// Short format: u16 * 2
|
||||
let o = read_u16(&self.data, loca_off + glyph_id as usize * 2) as usize * 2;
|
||||
let n = read_u16(&self.data, loca_off + (glyph_id as usize + 1) * 2) as usize * 2;
|
||||
(o, n)
|
||||
} else {
|
||||
// Long format: u32
|
||||
let o = read_u32(&self.data, loca_off + glyph_id as usize * 4) as usize;
|
||||
let n = read_u32(&self.data, loca_off + (glyph_id as usize + 1) * 4) as usize;
|
||||
(o, n)
|
||||
};
|
||||
|
||||
if offset == next_offset {
|
||||
// Empty glyph (e.g., space)
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((glyf_off + offset, next_offset - offset))
|
||||
}
|
||||
|
||||
/// Parse the outline of a simple glyph.
|
||||
pub fn glyph_outline(&self, glyph_id: u16) -> Option<GlyphOutline> {
|
||||
let (glyph_off, _glyph_len) = match self.glyph_offset(glyph_id) {
|
||||
Some(v) => v,
|
||||
None => return None, // empty glyph
|
||||
};
|
||||
|
||||
let num_contours = read_i16(&self.data, glyph_off);
|
||||
if num_contours < 0 {
|
||||
// Compound glyph — not supported
|
||||
return None;
|
||||
}
|
||||
|
||||
let num_contours = num_contours as usize;
|
||||
if num_contours == 0 {
|
||||
return Some(GlyphOutline {
|
||||
contours: Vec::new(),
|
||||
x_min: read_i16(&self.data, glyph_off + 2),
|
||||
y_min: read_i16(&self.data, glyph_off + 4),
|
||||
x_max: read_i16(&self.data, glyph_off + 6),
|
||||
y_max: read_i16(&self.data, glyph_off + 8),
|
||||
});
|
||||
}
|
||||
|
||||
let x_min = read_i16(&self.data, glyph_off + 2);
|
||||
let y_min = read_i16(&self.data, glyph_off + 4);
|
||||
let x_max = read_i16(&self.data, glyph_off + 6);
|
||||
let y_max = read_i16(&self.data, glyph_off + 8);
|
||||
|
||||
// endPtsOfContours
|
||||
let mut end_pts = Vec::with_capacity(num_contours);
|
||||
let mut off = glyph_off + 10;
|
||||
for _ in 0..num_contours {
|
||||
end_pts.push(read_u16(&self.data, off) as usize);
|
||||
off += 2;
|
||||
}
|
||||
|
||||
let num_points = end_pts[num_contours - 1] + 1;
|
||||
|
||||
// Skip instructions
|
||||
let instruction_length = read_u16(&self.data, off) as usize;
|
||||
off += 2 + instruction_length;
|
||||
|
||||
// Parse flags
|
||||
let mut flags = Vec::with_capacity(num_points);
|
||||
while flags.len() < num_points {
|
||||
let flag = read_u8(&self.data, off);
|
||||
off += 1;
|
||||
flags.push(flag);
|
||||
if flag & REPEAT_FLAG != 0 {
|
||||
let repeat_count = read_u8(&self.data, off) as usize;
|
||||
off += 1;
|
||||
for _ in 0..repeat_count {
|
||||
flags.push(flag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse x-coordinates (delta-encoded)
|
||||
let mut x_coords = Vec::with_capacity(num_points);
|
||||
let mut x: i32 = 0;
|
||||
for i in 0..num_points {
|
||||
let flag = flags[i];
|
||||
if flag & X_SHORT != 0 {
|
||||
let dx = read_u8(&self.data, off) as i32;
|
||||
off += 1;
|
||||
x += if flag & X_SAME_OR_POS != 0 { dx } else { -dx };
|
||||
} else if flag & X_SAME_OR_POS != 0 {
|
||||
// delta = 0
|
||||
} else {
|
||||
let dx = read_i16(&self.data, off) as i32;
|
||||
off += 2;
|
||||
x += dx;
|
||||
}
|
||||
x_coords.push(x);
|
||||
}
|
||||
|
||||
// Parse y-coordinates (delta-encoded)
|
||||
let mut y_coords = Vec::with_capacity(num_points);
|
||||
let mut y: i32 = 0;
|
||||
for i in 0..num_points {
|
||||
let flag = flags[i];
|
||||
if flag & Y_SHORT != 0 {
|
||||
let dy = read_u8(&self.data, off) as i32;
|
||||
off += 1;
|
||||
y += if flag & Y_SAME_OR_POS != 0 { dy } else { -dy };
|
||||
} else if flag & Y_SAME_OR_POS != 0 {
|
||||
// delta = 0
|
||||
} else {
|
||||
let dy = read_i16(&self.data, off) as i32;
|
||||
off += 2;
|
||||
y += dy;
|
||||
}
|
||||
y_coords.push(y);
|
||||
}
|
||||
|
||||
// Build contours with implicit on-curve point insertion
|
||||
let mut contours = Vec::with_capacity(num_contours);
|
||||
let mut start = 0;
|
||||
for &end in &end_pts {
|
||||
let raw_points: Vec<(f32, f32, bool)> = (start..=end)
|
||||
.map(|i| {
|
||||
(
|
||||
x_coords[i] as f32,
|
||||
y_coords[i] as f32,
|
||||
flags[i] & ON_CURVE != 0,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut contour = Vec::new();
|
||||
let n = raw_points.len();
|
||||
if n == 0 {
|
||||
start = end + 1;
|
||||
contours.push(contour);
|
||||
continue;
|
||||
}
|
||||
|
||||
for j in 0..n {
|
||||
let (cx, cy, c_on) = raw_points[j];
|
||||
let (nx, ny, n_on) = raw_points[(j + 1) % n];
|
||||
|
||||
contour.push(OutlinePoint {
|
||||
x: cx,
|
||||
y: cy,
|
||||
on_curve: c_on,
|
||||
});
|
||||
|
||||
// If both current and next are off-curve, insert implicit midpoint
|
||||
if !c_on && !n_on {
|
||||
contour.push(OutlinePoint {
|
||||
x: (cx + nx) * 0.5,
|
||||
y: (cy + ny) * 0.5,
|
||||
on_curve: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
start = end + 1;
|
||||
contours.push(contour);
|
||||
}
|
||||
|
||||
Some(GlyphOutline {
|
||||
contours,
|
||||
x_min,
|
||||
y_min,
|
||||
x_max,
|
||||
y_max,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the horizontal metrics for a glyph.
|
||||
pub fn glyph_metrics(&self, glyph_id: u16) -> GlyphMetrics {
|
||||
let &(hmtx_off, _) = match self.tables.get(b"hmtx") {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return GlyphMetrics {
|
||||
advance_width: 0,
|
||||
left_side_bearing: 0,
|
||||
}
|
||||
}
|
||||
};
|
||||
let hmtx_off = hmtx_off as usize;
|
||||
|
||||
if (glyph_id as u16) < self.num_h_metrics {
|
||||
let rec = hmtx_off + glyph_id as usize * 4;
|
||||
GlyphMetrics {
|
||||
advance_width: read_u16(&self.data, rec),
|
||||
left_side_bearing: read_i16(&self.data, rec + 2),
|
||||
}
|
||||
} else {
|
||||
// Use last advance_width, lsb from separate array
|
||||
let last_aw_off = hmtx_off + (self.num_h_metrics as usize - 1) * 4;
|
||||
let advance_width = read_u16(&self.data, last_aw_off);
|
||||
let lsb_array_off = hmtx_off + self.num_h_metrics as usize * 4;
|
||||
let idx = glyph_id as usize - self.num_h_metrics as usize;
|
||||
let left_side_bearing = read_i16(&self.data, lsb_array_off + idx * 2);
|
||||
GlyphMetrics {
|
||||
advance_width,
|
||||
left_side_bearing,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn load_test_font() -> Option<TtfParser> {
|
||||
let paths = [
|
||||
"C:/Windows/Fonts/arial.ttf",
|
||||
"C:/Windows/Fonts/consola.ttf",
|
||||
];
|
||||
for path in &paths {
|
||||
if let Ok(data) = std::fs::read(path) {
|
||||
if let Ok(parser) = TtfParser::parse(data) {
|
||||
return Some(parser);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_loads_tables() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
assert!(parser.tables.contains_key(b"head"));
|
||||
assert!(parser.tables.contains_key(b"cmap"));
|
||||
assert!(parser.tables.contains_key(b"glyf"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_head_values() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
assert!(parser.units_per_em > 0);
|
||||
assert!(parser.loca_format == 0 || parser.loca_format == 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hhea_values() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
assert!(parser.ascender > 0);
|
||||
assert!(parser.num_h_metrics > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maxp_values() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
assert!(parser.num_glyphs > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmap_ascii() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let glyph_a = parser.glyph_index(0x41); // 'A'
|
||||
assert!(glyph_a > 0, "glyph index for 'A' should be > 0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmap_space() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let glyph = parser.glyph_index(0x20); // space
|
||||
assert!(glyph > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmap_unmapped() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let glyph = parser.glyph_index(0xFFFD0); // unlikely codepoint
|
||||
assert_eq!(glyph, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_glyph_outline_has_contours() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let gid = parser.glyph_index(0x41); // 'A'
|
||||
let outline = parser.glyph_outline(gid);
|
||||
assert!(outline.is_some());
|
||||
let outline = outline.unwrap();
|
||||
assert!(!outline.contours.is_empty(), "A should have contours");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_glyph_metrics() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let gid = parser.glyph_index(0x41);
|
||||
let metrics = parser.glyph_metrics(gid);
|
||||
assert!(metrics.advance_width > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_space_no_contours() {
|
||||
let parser = load_test_font().expect("no test font found");
|
||||
let gid = parser.glyph_index(0x20);
|
||||
let outline = parser.glyph_outline(gid);
|
||||
// Space may have no outline or empty contours
|
||||
if let Some(o) = outline {
|
||||
assert!(o.contours.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -41,6 +41,8 @@ pub struct UiContext {
|
||||
pub dragging: Option<(u32, u64)>,
|
||||
pub drag_start: (f32, f32),
|
||||
pub(crate) drag_started: bool,
|
||||
/// TTF font for high-quality text rendering (None = bitmap fallback).
|
||||
pub ttf_font: Option<crate::ttf_font::TtfFont>,
|
||||
}
|
||||
|
||||
impl UiContext {
|
||||
@@ -70,6 +72,7 @@ impl UiContext {
|
||||
dragging: None,
|
||||
drag_start: (0.0, 0.0),
|
||||
drag_started: false,
|
||||
ttf_font: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,4 +144,41 @@ impl UiContext {
|
||||
&& self.mouse_y >= y
|
||||
&& self.mouse_y < y + h
|
||||
}
|
||||
|
||||
/// Draw text using TTF font if available, otherwise bitmap font fallback.
|
||||
pub fn draw_text(&mut self, text: &str, x: f32, y: f32, color: [u8; 4]) {
|
||||
if let Some(ref mut ttf) = self.ttf_font {
|
||||
let ascender = ttf.ascender;
|
||||
let mut cx = x;
|
||||
for ch in text.chars() {
|
||||
let info = ttf.glyph(ch).clone();
|
||||
if info.width > 0.0 && info.height > 0.0 {
|
||||
let gx = cx + info.bearing_x;
|
||||
let gy = y + ascender - info.bearing_y;
|
||||
let (u0, v0, u1, v1) = (info.uv[0], info.uv[1], info.uv[2], info.uv[3]);
|
||||
self.draw_list.add_rect_uv(gx, gy, info.width, info.height, u0, v0, u1, v1, color);
|
||||
}
|
||||
cx += info.advance;
|
||||
}
|
||||
} else {
|
||||
// Bitmap font fallback
|
||||
let gw = self.font.glyph_width as f32;
|
||||
let gh = self.font.glyph_height as f32;
|
||||
let mut cx = x;
|
||||
for ch in text.chars() {
|
||||
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
|
||||
self.draw_list.add_rect_uv(cx, y, gw, gh, u0, v0, u1, v1, color);
|
||||
cx += gw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate text width using TTF or bitmap font.
|
||||
pub fn ttf_text_width(&mut self, text: &str) -> f32 {
|
||||
if let Some(ref mut ttf) = self.ttf_font {
|
||||
ttf.text_width(text)
|
||||
} else {
|
||||
text.len() as f32 * self.font.glyph_width as f32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
182
crates/voltex_editor/src/viewport_renderer.rs
Normal file
182
crates/voltex_editor/src/viewport_renderer.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
struct RectUniform {
|
||||
rect: [f32; 4],
|
||||
screen: [f32; 2],
|
||||
_pad: [f32; 2],
|
||||
}
|
||||
|
||||
pub struct ViewportRenderer {
|
||||
pipeline: wgpu::RenderPipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
sampler: wgpu::Sampler,
|
||||
uniform_buffer: wgpu::Buffer,
|
||||
}
|
||||
|
||||
impl ViewportRenderer {
|
||||
pub fn new(device: &wgpu::Device, surface_format: wgpu::TextureFormat) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Viewport Blit Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("viewport_shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Viewport Blit BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Viewport Blit PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Viewport Blit Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: surface_format,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: None,
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview_mask: None,
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("Viewport Sampler"),
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Viewport Rect Uniform"),
|
||||
size: std::mem::size_of::<RectUniform>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
ViewportRenderer {
|
||||
pipeline,
|
||||
bind_group_layout,
|
||||
sampler,
|
||||
uniform_buffer,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
target_view: &wgpu::TextureView,
|
||||
viewport_color_view: &wgpu::TextureView,
|
||||
screen_w: f32,
|
||||
screen_h: f32,
|
||||
rect_x: f32,
|
||||
rect_y: f32,
|
||||
rect_w: f32,
|
||||
rect_h: f32,
|
||||
) {
|
||||
let uniform = RectUniform {
|
||||
rect: [rect_x, rect_y, rect_w, rect_h],
|
||||
screen: [screen_w, screen_h],
|
||||
_pad: [0.0; 2],
|
||||
};
|
||||
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniform]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Viewport Blit BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: self.uniform_buffer.as_entire_binding(),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(viewport_color_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Sampler(&self.sampler),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Viewport Blit Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: target_view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Load,
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
multiview_mask: None,
|
||||
});
|
||||
|
||||
rpass.set_scissor_rect(
|
||||
rect_x.max(0.0) as u32,
|
||||
rect_y.max(0.0) as u32,
|
||||
rect_w.ceil().max(1.0) as u32,
|
||||
rect_h.ceil().max(1.0) as u32,
|
||||
);
|
||||
rpass.set_pipeline(&self.pipeline);
|
||||
rpass.set_bind_group(0, &bind_group, &[]);
|
||||
rpass.draw(0..6, 0..1);
|
||||
}
|
||||
}
|
||||
42
crates/voltex_editor/src/viewport_shader.wgsl
Normal file
42
crates/voltex_editor/src/viewport_shader.wgsl
Normal file
@@ -0,0 +1,42 @@
|
||||
struct RectUniform {
|
||||
rect: vec4<f32>,
|
||||
screen: vec2<f32>,
|
||||
_pad: vec2<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var<uniform> u: RectUniform;
|
||||
@group(0) @binding(1) var t_viewport: texture_2d<f32>;
|
||||
@group(0) @binding(2) var s_viewport: sampler;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(0) uv: vec2<f32>,
|
||||
};
|
||||
|
||||
@vertex
|
||||
fn vs_main(@builtin(vertex_index) idx: u32) -> VertexOutput {
|
||||
var positions = array<vec2<f32>, 6>(
|
||||
vec2<f32>(0.0, 0.0),
|
||||
vec2<f32>(1.0, 0.0),
|
||||
vec2<f32>(1.0, 1.0),
|
||||
vec2<f32>(0.0, 0.0),
|
||||
vec2<f32>(1.0, 1.0),
|
||||
vec2<f32>(0.0, 1.0),
|
||||
);
|
||||
|
||||
let p = positions[idx];
|
||||
let px = u.rect.x + p.x * u.rect.z;
|
||||
let py = u.rect.y + p.y * u.rect.w;
|
||||
let ndc_x = (px / u.screen.x) * 2.0 - 1.0;
|
||||
let ndc_y = 1.0 - (py / u.screen.y) * 2.0;
|
||||
|
||||
var out: VertexOutput;
|
||||
out.position = vec4<f32>(ndc_x, ndc_y, 0.0, 1.0);
|
||||
out.uv = p;
|
||||
return out;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
return textureSample(t_viewport, s_viewport, in.uv);
|
||||
}
|
||||
69
crates/voltex_editor/src/viewport_texture.rs
Normal file
69
crates/voltex_editor/src/viewport_texture.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
pub const VIEWPORT_COLOR_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;
|
||||
pub const VIEWPORT_DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
|
||||
|
||||
pub struct ViewportTexture {
|
||||
pub color_texture: wgpu::Texture,
|
||||
pub color_view: wgpu::TextureView,
|
||||
pub depth_texture: wgpu::Texture,
|
||||
pub depth_view: wgpu::TextureView,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
impl ViewportTexture {
|
||||
pub fn new(device: &wgpu::Device, width: u32, height: u32) -> Self {
|
||||
let w = width.max(1);
|
||||
let h = height.max(1);
|
||||
|
||||
let color_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Viewport Color"),
|
||||
size: wgpu::Extent3d {
|
||||
width: w,
|
||||
height: h,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: VIEWPORT_COLOR_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
let color_view = color_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Viewport Depth"),
|
||||
size: wgpu::Extent3d {
|
||||
width: w,
|
||||
height: h,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: VIEWPORT_DEPTH_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
view_formats: &[],
|
||||
});
|
||||
let depth_view = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
ViewportTexture {
|
||||
color_texture,
|
||||
color_view,
|
||||
depth_texture,
|
||||
depth_view,
|
||||
width: w,
|
||||
height: h,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ensure_size(&mut self, device: &wgpu::Device, width: u32, height: u32) -> bool {
|
||||
let w = width.max(1);
|
||||
let h = height.max(1);
|
||||
if w == self.width && h == self.height {
|
||||
return false;
|
||||
}
|
||||
*self = Self::new(device, w, h);
|
||||
true
|
||||
}
|
||||
}
|
||||
140
crates/voltex_net/src/encryption.rs
Normal file
140
crates/voltex_net/src/encryption.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
/// Simple XOR cipher with rotating key + sequence counter.
|
||||
pub struct PacketCipher {
|
||||
key: Vec<u8>,
|
||||
send_counter: u64,
|
||||
recv_counter: u64,
|
||||
}
|
||||
|
||||
impl PacketCipher {
|
||||
pub fn new(key: &[u8]) -> Self {
|
||||
assert!(!key.is_empty(), "encryption key must not be empty");
|
||||
PacketCipher { key: key.to_vec(), send_counter: 0, recv_counter: 0 }
|
||||
}
|
||||
|
||||
/// Encrypt data in-place. Prepends 8-byte sequence number.
|
||||
pub fn encrypt(&mut self, plaintext: &[u8]) -> Vec<u8> {
|
||||
let mut output = Vec::with_capacity(8 + plaintext.len());
|
||||
// Prepend sequence counter
|
||||
output.extend_from_slice(&self.send_counter.to_le_bytes());
|
||||
// XOR plaintext with key derived from counter + base key
|
||||
let derived = self.derive_key(self.send_counter);
|
||||
for (i, &byte) in plaintext.iter().enumerate() {
|
||||
output.push(byte ^ derived[i % derived.len()]);
|
||||
}
|
||||
self.send_counter += 1;
|
||||
output
|
||||
}
|
||||
|
||||
/// Decrypt data. Validates sequence number.
|
||||
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, String> {
|
||||
if ciphertext.len() < 8 {
|
||||
return Err("packet too short".to_string());
|
||||
}
|
||||
let seq = u64::from_le_bytes(ciphertext[0..8].try_into().unwrap());
|
||||
|
||||
// Anti-replay: sequence must be >= expected
|
||||
if seq < self.recv_counter {
|
||||
return Err(format!("replay detected: got seq {}, expected >= {}", seq, self.recv_counter));
|
||||
}
|
||||
self.recv_counter = seq + 1;
|
||||
|
||||
let derived = self.derive_key(seq);
|
||||
let mut plaintext = Vec::with_capacity(ciphertext.len() - 8);
|
||||
for (i, &byte) in ciphertext[8..].iter().enumerate() {
|
||||
plaintext.push(byte ^ derived[i % derived.len()]);
|
||||
}
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Derive a key from the base key + counter.
|
||||
fn derive_key(&self, counter: u64) -> Vec<u8> {
|
||||
let counter_bytes = counter.to_le_bytes();
|
||||
self.key.iter().enumerate().map(|(i, &k)| {
|
||||
k.wrapping_add(counter_bytes[i % 8])
|
||||
}).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple token-based authentication.
|
||||
pub struct AuthToken {
|
||||
pub player_id: u32,
|
||||
pub token: Vec<u8>,
|
||||
pub expires_at: f64, // timestamp
|
||||
}
|
||||
|
||||
impl AuthToken {
|
||||
/// Generate a simple auth token from player_id + secret.
|
||||
pub fn generate(player_id: u32, secret: &[u8], expires_at: f64) -> Self {
|
||||
let mut token = Vec::new();
|
||||
token.extend_from_slice(&player_id.to_le_bytes());
|
||||
token.extend_from_slice(&expires_at.to_le_bytes());
|
||||
// Simple HMAC-like: XOR with secret
|
||||
for (i, byte) in token.iter_mut().enumerate() {
|
||||
*byte ^= secret[i % secret.len()];
|
||||
}
|
||||
AuthToken { player_id, token, expires_at }
|
||||
}
|
||||
|
||||
/// Validate token against secret.
|
||||
pub fn validate(&self, secret: &[u8], current_time: f64) -> bool {
|
||||
if current_time > self.expires_at { return false; }
|
||||
let expected = AuthToken::generate(self.player_id, secret, self.expires_at);
|
||||
self.token == expected.token
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_roundtrip() {
|
||||
let key = b"secret_key_1234";
|
||||
let mut encryptor = PacketCipher::new(key);
|
||||
let mut decryptor = PacketCipher::new(key);
|
||||
let msg = b"hello world";
|
||||
let encrypted = encryptor.encrypt(msg);
|
||||
let decrypted = decryptor.decrypt(&encrypted).unwrap();
|
||||
assert_eq!(&decrypted, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypted_differs_from_plain() {
|
||||
let mut cipher = PacketCipher::new(b"key");
|
||||
let msg = b"test message";
|
||||
let encrypted = cipher.encrypt(msg);
|
||||
assert_ne!(&encrypted[8..], msg); // ciphertext differs
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_replay_rejected() {
|
||||
let key = b"key";
|
||||
let mut enc = PacketCipher::new(key);
|
||||
let mut dec = PacketCipher::new(key);
|
||||
let pkt1 = enc.encrypt(b"first");
|
||||
let pkt2 = enc.encrypt(b"second");
|
||||
let _ = dec.decrypt(&pkt2).unwrap(); // accept pkt2 (seq=1)
|
||||
let result = dec.decrypt(&pkt1); // pkt1 has seq=0 < expected 2
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_token_valid() {
|
||||
let secret = b"server_secret";
|
||||
let token = AuthToken::generate(42, secret, 1000.0);
|
||||
assert!(token.validate(secret, 999.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_token_expired() {
|
||||
let secret = b"server_secret";
|
||||
let token = AuthToken::generate(42, secret, 1000.0);
|
||||
assert!(!token.validate(secret, 1001.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_token_wrong_secret() {
|
||||
let token = AuthToken::generate(42, b"correct", 1000.0);
|
||||
assert!(!token.validate(b"wronggg", 999.0));
|
||||
}
|
||||
}
|
||||
140
crates/voltex_net/src/lag_compensation.rs
Normal file
140
crates/voltex_net/src/lag_compensation.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use std::collections::VecDeque;
|
||||
|
||||
/// A timestamped snapshot of entity positions for lag compensation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HistoryEntry {
|
||||
pub tick: u64,
|
||||
pub timestamp: f32, // seconds
|
||||
pub positions: Vec<([f32; 3], u32)>, // (position, entity_id)
|
||||
}
|
||||
|
||||
/// Stores recent world state history for server-side lag compensation.
|
||||
pub struct LagCompensation {
|
||||
history: VecDeque<HistoryEntry>,
|
||||
pub max_history_ms: f32, // max history duration (e.g., 200ms)
|
||||
}
|
||||
|
||||
impl LagCompensation {
|
||||
pub fn new(max_history_ms: f32) -> Self {
|
||||
LagCompensation { history: VecDeque::new(), max_history_ms }
|
||||
}
|
||||
|
||||
/// Record current world state.
|
||||
pub fn record(&mut self, entry: HistoryEntry) {
|
||||
self.history.push_back(entry);
|
||||
// Prune old entries
|
||||
let cutoff = self.history.back().map(|e| e.timestamp - self.max_history_ms / 1000.0).unwrap_or(0.0);
|
||||
while let Some(front) = self.history.front() {
|
||||
if front.timestamp < cutoff {
|
||||
self.history.pop_front();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the closest history entry to the given timestamp.
|
||||
pub fn rewind(&self, timestamp: f32) -> Option<&HistoryEntry> {
|
||||
let mut best: Option<&HistoryEntry> = None;
|
||||
let mut best_diff = f32::MAX;
|
||||
for entry in &self.history {
|
||||
let diff = (entry.timestamp - timestamp).abs();
|
||||
if diff < best_diff {
|
||||
best_diff = diff;
|
||||
best = Some(entry);
|
||||
}
|
||||
}
|
||||
best
|
||||
}
|
||||
|
||||
/// Interpolate between two closest entries at the given timestamp.
|
||||
pub fn rewind_interpolated(&self, timestamp: f32) -> Option<Vec<([f32; 3], u32)>> {
|
||||
if self.history.len() < 2 { return self.rewind(timestamp).map(|e| e.positions.clone()); }
|
||||
|
||||
// Find the two entries bracketing the timestamp
|
||||
let mut before: Option<&HistoryEntry> = None;
|
||||
let mut after: Option<&HistoryEntry> = None;
|
||||
for entry in &self.history {
|
||||
if entry.timestamp <= timestamp {
|
||||
before = Some(entry);
|
||||
} else {
|
||||
after = Some(entry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
match (before, after) {
|
||||
(Some(b), Some(a)) => {
|
||||
let t = if (a.timestamp - b.timestamp).abs() > 1e-6 {
|
||||
(timestamp - b.timestamp) / (a.timestamp - b.timestamp)
|
||||
} else { 0.0 };
|
||||
// Interpolate positions
|
||||
let mut result = Vec::new();
|
||||
for (i, (pos_b, id)) in b.positions.iter().enumerate() {
|
||||
if let Some((pos_a, _)) = a.positions.get(i) {
|
||||
let lerped = [
|
||||
pos_b[0] + (pos_a[0] - pos_b[0]) * t,
|
||||
pos_b[1] + (pos_a[1] - pos_b[1]) * t,
|
||||
pos_b[2] + (pos_a[2] - pos_b[2]) * t,
|
||||
];
|
||||
result.push((lerped, *id));
|
||||
}
|
||||
}
|
||||
Some(result)
|
||||
}
|
||||
_ => self.rewind(timestamp).map(|e| e.positions.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn history_len(&self) -> usize { self.history.len() }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_record_and_rewind() {
|
||||
let mut lc = LagCompensation::new(200.0);
|
||||
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![([1.0, 0.0, 0.0], 0)] });
|
||||
lc.record(HistoryEntry { tick: 2, timestamp: 0.016, positions: vec![([2.0, 0.0, 0.0], 0)] });
|
||||
let entry = lc.rewind(0.0).unwrap();
|
||||
assert_eq!(entry.tick, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prune_old() {
|
||||
let mut lc = LagCompensation::new(100.0); // 100ms
|
||||
for i in 0..20 {
|
||||
lc.record(HistoryEntry { tick: i, timestamp: i as f32 * 0.016, positions: vec![] });
|
||||
}
|
||||
// At t=0.304, cutoff = 0.304 - 0.1 = 0.204
|
||||
// Entries before t=0.204 should be pruned
|
||||
assert!(lc.history_len() < 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewind_closest() {
|
||||
let mut lc = LagCompensation::new(500.0);
|
||||
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![] });
|
||||
lc.record(HistoryEntry { tick: 2, timestamp: 0.1, positions: vec![] });
|
||||
lc.record(HistoryEntry { tick: 3, timestamp: 0.2, positions: vec![] });
|
||||
let entry = lc.rewind(0.09).unwrap();
|
||||
assert_eq!(entry.tick, 2); // closest to 0.1
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewind_interpolated() {
|
||||
let mut lc = LagCompensation::new(500.0);
|
||||
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![([0.0, 0.0, 0.0], 0)] });
|
||||
lc.record(HistoryEntry { tick: 2, timestamp: 0.1, positions: vec![([10.0, 0.0, 0.0], 0)] });
|
||||
let interp = lc.rewind_interpolated(0.05).unwrap();
|
||||
assert!((interp[0].0[0] - 5.0).abs() < 0.1); // midpoint
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_history() {
|
||||
let lc = LagCompensation::new(200.0);
|
||||
assert!(lc.rewind(0.0).is_none());
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,8 @@ pub mod client;
|
||||
pub mod reliable;
|
||||
pub mod snapshot;
|
||||
pub mod interpolation;
|
||||
pub mod lag_compensation;
|
||||
pub mod encryption;
|
||||
|
||||
pub use packet::Packet;
|
||||
pub use socket::NetSocket;
|
||||
@@ -13,3 +15,5 @@ pub use client::{NetClient, ClientEvent};
|
||||
pub use reliable::{ReliableChannel, OrderedChannel};
|
||||
pub use snapshot::{Snapshot, EntityState, serialize_snapshot, deserialize_snapshot, diff_snapshots, apply_diff};
|
||||
pub use interpolation::InterpolationBuffer;
|
||||
pub use lag_compensation::LagCompensation;
|
||||
pub use encryption::{PacketCipher, AuthToken};
|
||||
|
||||
@@ -1,10 +1,37 @@
|
||||
use voltex_math::{Vec3, AABB};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
/// A convex hull collider defined by a set of vertices.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConvexHull {
|
||||
pub vertices: Vec<Vec3>,
|
||||
}
|
||||
|
||||
impl ConvexHull {
|
||||
pub fn new(vertices: Vec<Vec3>) -> Self {
|
||||
ConvexHull { vertices }
|
||||
}
|
||||
|
||||
/// Support function for GJK: returns the vertex farthest in the given direction.
|
||||
pub fn support(&self, direction: Vec3) -> Vec3 {
|
||||
let mut best = self.vertices[0];
|
||||
let mut best_dot = best.dot(direction);
|
||||
for &v in &self.vertices[1..] {
|
||||
let d = v.dot(direction);
|
||||
if d > best_dot {
|
||||
best_dot = d;
|
||||
best = v;
|
||||
}
|
||||
}
|
||||
best
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Collider {
|
||||
Sphere { radius: f32 },
|
||||
Box { half_extents: Vec3 },
|
||||
Capsule { radius: f32, half_height: f32 },
|
||||
ConvexHull(ConvexHull),
|
||||
}
|
||||
|
||||
impl Collider {
|
||||
@@ -21,6 +48,16 @@ impl Collider {
|
||||
let r = Vec3::new(*radius, *half_height + *radius, *radius);
|
||||
AABB::new(position - r, position + r)
|
||||
}
|
||||
Collider::ConvexHull(hull) => {
|
||||
let mut min = position + hull.vertices[0];
|
||||
let mut max = min;
|
||||
for &v in &hull.vertices[1..] {
|
||||
let p = position + v;
|
||||
min = Vec3::new(min.x.min(p.x), min.y.min(p.y), min.z.min(p.z));
|
||||
max = Vec3::new(max.x.max(p.x), max.y.max(p.y), max.z.max(p.z));
|
||||
}
|
||||
AABB::new(min, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -52,4 +89,45 @@ mod tests {
|
||||
assert_eq!(aabb.min, Vec3::new(0.5, 0.5, 2.5));
|
||||
assert_eq!(aabb.max, Vec3::new(1.5, 3.5, 3.5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convex_hull_support() {
|
||||
let hull = ConvexHull::new(vec![
|
||||
Vec3::new(-1.0, -1.0, -1.0),
|
||||
Vec3::new(1.0, -1.0, -1.0),
|
||||
Vec3::new(0.0, 1.0, 0.0),
|
||||
Vec3::new(0.0, -1.0, 1.0),
|
||||
]);
|
||||
// Support in +Y direction should return the top vertex
|
||||
let s = hull.support(Vec3::new(0.0, 1.0, 0.0));
|
||||
assert!((s.y - 1.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convex_hull_support_negative() {
|
||||
let hull = ConvexHull::new(vec![
|
||||
Vec3::new(0.0, 0.0, 0.0),
|
||||
Vec3::new(1.0, 0.0, 0.0),
|
||||
Vec3::new(0.0, 1.0, 0.0),
|
||||
]);
|
||||
let s = hull.support(Vec3::new(-1.0, 0.0, 0.0));
|
||||
assert!((s.x - 0.0).abs() < 1e-6); // origin is farthest in -X
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convex_hull_in_collider() {
|
||||
// Test that ConvexHull variant works with existing collision system
|
||||
let hull = ConvexHull::new(vec![
|
||||
Vec3::new(-1.0, -1.0, -1.0),
|
||||
Vec3::new(1.0, -1.0, -1.0),
|
||||
Vec3::new(1.0, 1.0, -1.0),
|
||||
Vec3::new(-1.0, 1.0, -1.0),
|
||||
Vec3::new(-1.0, -1.0, 1.0),
|
||||
Vec3::new(1.0, -1.0, 1.0),
|
||||
Vec3::new(1.0, 1.0, 1.0),
|
||||
Vec3::new(-1.0, 1.0, 1.0),
|
||||
]);
|
||||
// Just verify construction works
|
||||
assert_eq!(hull.vertices.len(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ pub fn detect_collisions(world: &World) -> Vec<ContactPoint> {
|
||||
let pairs_data: Vec<(Entity, Vec3, Collider)> = world
|
||||
.query2::<Transform, Collider>()
|
||||
.into_iter()
|
||||
.map(|(e, t, c)| (e, t.position, *c))
|
||||
.map(|(e, t, c)| (e, t.position, c.clone()))
|
||||
.collect();
|
||||
|
||||
if pairs_data.len() < 2 {
|
||||
@@ -34,7 +34,7 @@ pub fn detect_collisions(world: &World) -> Vec<ContactPoint> {
|
||||
let mut contacts = Vec::new();
|
||||
|
||||
let lookup = |entity: Entity| -> Option<(Vec3, Collider)> {
|
||||
pairs_data.iter().find(|(e, _, _)| *e == entity).map(|(_, p, c)| (*p, *c))
|
||||
pairs_data.iter().find(|(e, _, _)| *e == entity).map(|(_, p, c)| (*p, c.clone()))
|
||||
};
|
||||
|
||||
for (ea, eb) in broad_pairs {
|
||||
@@ -55,8 +55,9 @@ pub fn detect_collisions(world: &World) -> Vec<ContactPoint> {
|
||||
(Collider::Box { half_extents: ha }, Collider::Box { half_extents: hb }) => {
|
||||
narrow::box_vs_box(pos_a, *ha, pos_b, *hb)
|
||||
}
|
||||
// Any combination involving Capsule uses GJK/EPA
|
||||
(Collider::Capsule { .. }, _) | (_, Collider::Capsule { .. }) => {
|
||||
// Any combination involving Capsule or ConvexHull uses GJK/EPA
|
||||
(Collider::Capsule { .. }, _) | (_, Collider::Capsule { .. })
|
||||
| (Collider::ConvexHull(_), _) | (_, Collider::ConvexHull(_)) => {
|
||||
gjk::gjk_epa(&col_a, pos_a, &col_b, pos_b)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -30,6 +30,9 @@ fn support(collider: &Collider, position: Vec3, direction: Vec3) -> Vec3 {
|
||||
}
|
||||
base + direction * (*radius / len)
|
||||
}
|
||||
Collider::ConvexHull(hull) => {
|
||||
position + hull.support(direction)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,25 @@ pub fn inertia_tensor(collider: &Collider, mass: f32) -> Vec3 {
|
||||
let iz = ix;
|
||||
Vec3::new(ix, iy, iz)
|
||||
}
|
||||
Collider::ConvexHull(hull) => {
|
||||
// Approximate using AABB of the hull vertices
|
||||
let mut min = hull.vertices[0];
|
||||
let mut max = min;
|
||||
for &v in &hull.vertices[1..] {
|
||||
min = Vec3::new(min.x.min(v.x), min.y.min(v.y), min.z.min(v.z));
|
||||
max = Vec3::new(max.x.max(v.x), max.y.max(v.y), max.z.max(v.z));
|
||||
}
|
||||
let size = max - min;
|
||||
let w = size.x;
|
||||
let h = size.y;
|
||||
let d = size.z;
|
||||
let factor = mass / 12.0;
|
||||
Vec3::new(
|
||||
factor * (h * h + d * d),
|
||||
factor * (w * w + d * d),
|
||||
factor * (w * w + h * h),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,9 +10,10 @@ pub mod integrator;
|
||||
pub mod solver;
|
||||
pub mod raycast;
|
||||
pub mod ccd;
|
||||
pub mod mesh_collider;
|
||||
|
||||
pub use bvh::BvhTree;
|
||||
pub use collider::Collider;
|
||||
pub use collider::{Collider, ConvexHull};
|
||||
pub use contact::ContactPoint;
|
||||
pub use collision::detect_collisions;
|
||||
pub use rigid_body::{RigidBody, PhysicsConfig};
|
||||
@@ -21,3 +22,4 @@ pub use solver::{resolve_collisions, physics_step};
|
||||
pub use raycast::{RayHit, raycast, raycast_all};
|
||||
pub use ray::ray_vs_triangle;
|
||||
pub use ccd::swept_sphere_vs_aabb;
|
||||
pub use mesh_collider::{MeshCollider, MeshHit, ray_vs_mesh, ray_vs_mesh_all};
|
||||
|
||||
143
crates/voltex_physics/src/mesh_collider.rs
Normal file
143
crates/voltex_physics/src/mesh_collider.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use voltex_math::{Vec3, Ray};
|
||||
use crate::ray::ray_vs_triangle;
|
||||
|
||||
/// A triangle mesh collider defined by vertices and triangle indices.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MeshCollider {
|
||||
pub vertices: Vec<Vec3>,
|
||||
pub indices: Vec<[u32; 3]>,
|
||||
}
|
||||
|
||||
/// Result of a ray-mesh intersection test.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct MeshHit {
|
||||
pub distance: f32,
|
||||
pub point: Vec3,
|
||||
pub normal: Vec3,
|
||||
pub triangle_index: usize,
|
||||
}
|
||||
|
||||
/// Cast a ray against a triangle mesh. Returns the closest hit, if any.
|
||||
pub fn ray_vs_mesh(ray: &Ray, mesh: &MeshCollider) -> Option<MeshHit> {
|
||||
let mut closest: Option<MeshHit> = None;
|
||||
|
||||
for (i, tri) in mesh.indices.iter().enumerate() {
|
||||
let v0 = mesh.vertices[tri[0] as usize];
|
||||
let v1 = mesh.vertices[tri[1] as usize];
|
||||
let v2 = mesh.vertices[tri[2] as usize];
|
||||
|
||||
if let Some((t, normal)) = ray_vs_triangle(ray, v0, v1, v2) {
|
||||
let is_closer = closest.as_ref().map_or(true, |c| t < c.distance);
|
||||
if is_closer {
|
||||
let point = ray.at(t);
|
||||
closest = Some(MeshHit {
|
||||
distance: t,
|
||||
point,
|
||||
normal,
|
||||
triangle_index: i,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closest
|
||||
}
|
||||
|
||||
/// Cast a ray against a triangle mesh. Returns all hits sorted by distance.
|
||||
pub fn ray_vs_mesh_all(ray: &Ray, mesh: &MeshCollider) -> Vec<MeshHit> {
|
||||
let mut hits = Vec::new();
|
||||
|
||||
for (i, tri) in mesh.indices.iter().enumerate() {
|
||||
let v0 = mesh.vertices[tri[0] as usize];
|
||||
let v1 = mesh.vertices[tri[1] as usize];
|
||||
let v2 = mesh.vertices[tri[2] as usize];
|
||||
|
||||
if let Some((t, normal)) = ray_vs_triangle(ray, v0, v1, v2) {
|
||||
let point = ray.at(t);
|
||||
hits.push(MeshHit {
|
||||
distance: t,
|
||||
point,
|
||||
normal,
|
||||
triangle_index: i,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
hits.sort_by(|a, b| a.distance.partial_cmp(&b.distance).unwrap());
|
||||
hits
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_ray_vs_mesh_hit() {
|
||||
// Simple quad (2 triangles)
|
||||
let mesh = MeshCollider {
|
||||
vertices: vec![
|
||||
Vec3::new(-1.0, 0.0, -1.0),
|
||||
Vec3::new(1.0, 0.0, -1.0),
|
||||
Vec3::new(1.0, 0.0, 1.0),
|
||||
Vec3::new(-1.0, 0.0, 1.0),
|
||||
],
|
||||
indices: vec![[0, 1, 2], [0, 2, 3]],
|
||||
};
|
||||
let ray = Ray::new(Vec3::new(0.0, 1.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
|
||||
let hit = ray_vs_mesh(&ray, &mesh);
|
||||
assert!(hit.is_some());
|
||||
assert!((hit.unwrap().distance - 1.0).abs() < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ray_vs_mesh_miss() {
|
||||
let mesh = MeshCollider {
|
||||
vertices: vec![
|
||||
Vec3::new(-1.0, 0.0, -1.0),
|
||||
Vec3::new(1.0, 0.0, -1.0),
|
||||
Vec3::new(0.0, 0.0, 1.0),
|
||||
],
|
||||
indices: vec![[0, 1, 2]],
|
||||
};
|
||||
let ray = Ray::new(Vec3::new(5.0, 1.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
|
||||
assert!(ray_vs_mesh(&ray, &mesh).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ray_vs_mesh_closest() {
|
||||
// Two triangles at different heights
|
||||
let mesh = MeshCollider {
|
||||
vertices: vec![
|
||||
Vec3::new(-1.0, 0.0, -1.0),
|
||||
Vec3::new(1.0, 0.0, -1.0),
|
||||
Vec3::new(0.0, 0.0, 1.0),
|
||||
Vec3::new(-1.0, 2.0, -1.0),
|
||||
Vec3::new(1.0, 2.0, -1.0),
|
||||
Vec3::new(0.0, 2.0, 1.0),
|
||||
],
|
||||
indices: vec![[0, 1, 2], [3, 4, 5]],
|
||||
};
|
||||
let ray = Ray::new(Vec3::new(0.0, 5.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
|
||||
let hit = ray_vs_mesh(&ray, &mesh).unwrap();
|
||||
assert!((hit.distance - 3.0).abs() < 0.01); // hits y=2 first
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ray_vs_mesh_all() {
|
||||
let mesh = MeshCollider {
|
||||
vertices: vec![
|
||||
Vec3::new(-1.0, 0.0, -1.0),
|
||||
Vec3::new(1.0, 0.0, -1.0),
|
||||
Vec3::new(0.0, 0.0, 1.0),
|
||||
Vec3::new(-1.0, 2.0, -1.0),
|
||||
Vec3::new(1.0, 2.0, -1.0),
|
||||
Vec3::new(0.0, 2.0, 1.0),
|
||||
],
|
||||
indices: vec![[0, 1, 2], [3, 4, 5]],
|
||||
};
|
||||
let ray = Ray::new(Vec3::new(0.0, 5.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
|
||||
let hits = ray_vs_mesh_all(&ray, &mesh);
|
||||
assert_eq!(hits.len(), 2);
|
||||
assert!(hits[0].distance < hits[1].distance); // sorted
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ pub fn raycast(world: &World, ray: &Ray, max_dist: f32) -> Option<RayHit> {
|
||||
let entities: Vec<(Entity, Vec3, Collider)> = world
|
||||
.query2::<Transform, Collider>()
|
||||
.into_iter()
|
||||
.map(|(e, t, c)| (e, t.position, *c))
|
||||
.map(|(e, t, c)| (e, t.position, c.clone()))
|
||||
.collect();
|
||||
|
||||
if entities.is_empty() {
|
||||
@@ -53,6 +53,11 @@ pub fn raycast(world: &World, ray: &Ray, max_dist: f32) -> Option<RayHit> {
|
||||
Collider::Capsule { radius, half_height } => {
|
||||
ray_tests::ray_vs_capsule(ray, *pos, *radius, *half_height)
|
||||
}
|
||||
Collider::ConvexHull(_) => {
|
||||
// Use AABB test as approximation for convex hull raycasting
|
||||
let aabb = collider.aabb(*pos);
|
||||
ray_tests::ray_vs_aabb(ray, &aabb).map(|t| (t, Vec3::Y))
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((t, normal)) = result {
|
||||
@@ -77,7 +82,7 @@ pub fn raycast_all(world: &World, ray: &Ray, max_dist: f32) -> Vec<RayHit> {
|
||||
let entities: Vec<(Entity, Vec3, Collider)> = world
|
||||
.query2::<Transform, Collider>()
|
||||
.into_iter()
|
||||
.map(|(e, t, c)| (e, t.position, *c))
|
||||
.map(|(e, t, c)| (e, t.position, c.clone()))
|
||||
.collect();
|
||||
|
||||
if entities.is_empty() {
|
||||
@@ -106,6 +111,11 @@ pub fn raycast_all(world: &World, ray: &Ray, max_dist: f32) -> Vec<RayHit> {
|
||||
Collider::Capsule { radius, half_height } => {
|
||||
ray_tests::ray_vs_capsule(ray, *pos, *radius, *half_height)
|
||||
}
|
||||
Collider::ConvexHull(_) => {
|
||||
// Use AABB test as approximation for convex hull raycasting
|
||||
let aabb = collider.aabb(*pos);
|
||||
ray_tests::ray_vs_aabb(ray, &aabb).map(|t| (t, Vec3::Y))
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((t, normal)) = result {
|
||||
|
||||
@@ -23,8 +23,8 @@ pub fn resolve_collisions(world: &mut World, contacts: &[ContactPoint], iteratio
|
||||
for contact in contacts {
|
||||
let rb_a = world.get::<RigidBody>(contact.entity_a).copied();
|
||||
let rb_b = world.get::<RigidBody>(contact.entity_b).copied();
|
||||
let col_a = world.get::<Collider>(contact.entity_a).copied();
|
||||
let col_b = world.get::<Collider>(contact.entity_b).copied();
|
||||
let col_a = world.get::<Collider>(contact.entity_a).cloned();
|
||||
let col_b = world.get::<Collider>(contact.entity_b).cloned();
|
||||
let pos_a = world.get::<Transform>(contact.entity_a).map(|t| t.position);
|
||||
let pos_b = world.get::<Transform>(contact.entity_b).map(|t| t.position);
|
||||
|
||||
@@ -205,7 +205,7 @@ fn apply_ccd(world: &mut World, config: &PhysicsConfig) {
|
||||
.query3::<Transform, RigidBody, Collider>()
|
||||
.into_iter()
|
||||
.filter(|(_, _, rb, _)| !rb.is_static() && !rb.is_sleeping)
|
||||
.map(|(e, t, rb, c)| (e, t.position, rb.velocity, *c))
|
||||
.map(|(e, t, rb, c)| (e, t.position, rb.velocity, c.clone()))
|
||||
.collect();
|
||||
|
||||
let all_colliders: Vec<(Entity, voltex_math::AABB)> = world
|
||||
@@ -222,6 +222,10 @@ fn apply_ccd(world: &mut World, config: &PhysicsConfig) {
|
||||
Collider::Sphere { radius } => *radius,
|
||||
Collider::Box { half_extents } => half_extents.x.min(half_extents.y).min(half_extents.z),
|
||||
Collider::Capsule { radius, .. } => *radius,
|
||||
Collider::ConvexHull(hull) => {
|
||||
// Use minimum distance from origin to any vertex as approximate radius
|
||||
hull.vertices.iter().map(|v| v.length()).fold(f32::MAX, f32::min)
|
||||
}
|
||||
};
|
||||
|
||||
// Only apply CCD if displacement > collider radius
|
||||
|
||||
243
crates/voltex_renderer/src/auto_exposure.rs
Normal file
243
crates/voltex_renderer/src/auto_exposure.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
/// Pure CPU exposure calculation logic (testable).
|
||||
pub fn calculate_target_exposure(
|
||||
avg_log_luminance: f32,
|
||||
key_value: f32,
|
||||
min_exp: f32,
|
||||
max_exp: f32,
|
||||
) -> f32 {
|
||||
let avg_lum = avg_log_luminance.exp();
|
||||
let target = key_value / avg_lum.max(0.0001);
|
||||
target.clamp(min_exp, max_exp)
|
||||
}
|
||||
|
||||
/// Smooth adaptation over time.
|
||||
pub fn adapt_exposure(current: f32, target: f32, dt: f32, speed: f32) -> f32 {
|
||||
current + (target - current) * (1.0 - (-dt * speed).exp())
|
||||
}
|
||||
|
||||
/// GPU-side auto exposure compute + readback.
|
||||
pub struct AutoExposure {
|
||||
compute_pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
result_buffer: wgpu::Buffer,
|
||||
staging_buffer: wgpu::Buffer,
|
||||
pub exposure: f32,
|
||||
pub min_exposure: f32,
|
||||
pub max_exposure: f32,
|
||||
pub adaptation_speed: f32,
|
||||
pub key_value: f32,
|
||||
pending_read: bool,
|
||||
}
|
||||
|
||||
impl AutoExposure {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Auto Exposure Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("auto_exposure.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Auto Exposure BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: false },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Auto Exposure PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let compute_pipeline =
|
||||
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("Auto Exposure Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
module: &shader,
|
||||
entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let result_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Auto Exposure Result"),
|
||||
size: 8,
|
||||
usage: wgpu::BufferUsages::STORAGE
|
||||
| wgpu::BufferUsages::COPY_SRC
|
||||
| wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Auto Exposure Staging"),
|
||||
size: 8,
|
||||
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
AutoExposure {
|
||||
compute_pipeline,
|
||||
bind_group_layout,
|
||||
result_buffer,
|
||||
staging_buffer,
|
||||
exposure: 1.0,
|
||||
min_exposure: 0.1,
|
||||
max_exposure: 10.0,
|
||||
adaptation_speed: 2.0,
|
||||
key_value: 0.18,
|
||||
pending_read: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Dispatch compute shader to calculate luminance. Call once per frame.
|
||||
pub fn dispatch(
|
||||
&mut self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
hdr_view: &wgpu::TextureView,
|
||||
hdr_width: u32,
|
||||
hdr_height: u32,
|
||||
) {
|
||||
// Clear result buffer
|
||||
queue.write_buffer(&self.result_buffer, 0, &[0u8; 8]);
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Auto Exposure BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(hdr_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: self.result_buffer.as_entire_binding(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
{
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("Auto Exposure Pass"),
|
||||
timestamp_writes: None,
|
||||
});
|
||||
cpass.set_pipeline(&self.compute_pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
let wg_x = (hdr_width + 15) / 16;
|
||||
let wg_y = (hdr_height + 15) / 16;
|
||||
cpass.dispatch_workgroups(wg_x, wg_y, 1);
|
||||
}
|
||||
|
||||
// Copy result to staging for CPU readback
|
||||
encoder.copy_buffer_to_buffer(&self.result_buffer, 0, &self.staging_buffer, 0, 8);
|
||||
self.pending_read = true;
|
||||
}
|
||||
|
||||
/// Read back luminance result and update exposure. Call after queue.submit().
|
||||
/// Returns true if exposure was updated.
|
||||
pub fn update_exposure(&mut self, _dt: f32) -> bool {
|
||||
if !self.pending_read {
|
||||
return false;
|
||||
}
|
||||
self.pending_read = false;
|
||||
|
||||
let slice = self.staging_buffer.slice(..);
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
slice.map_async(wgpu::MapMode::Read, move |result| {
|
||||
let _ = tx.send(result);
|
||||
});
|
||||
|
||||
// The caller must poll the device for the map to complete.
|
||||
// For a full integration, use async or poll in the render loop.
|
||||
// For now, return false — use set_average_luminance() for CPU-side updates.
|
||||
let _ = rx;
|
||||
false
|
||||
}
|
||||
|
||||
/// Simple CPU-only exposure update without GPU readback.
|
||||
/// Use when you have a luminance estimate from other means.
|
||||
pub fn set_average_luminance(&mut self, avg_log_lum: f32, dt: f32) {
|
||||
let target = calculate_target_exposure(
|
||||
avg_log_lum,
|
||||
self.key_value,
|
||||
self.min_exposure,
|
||||
self.max_exposure,
|
||||
);
|
||||
self.exposure = adapt_exposure(self.exposure, target, dt, self.adaptation_speed);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_calculate_target_exposure() {
|
||||
// avg_log_lum = ln(0.18) -> avg_lum = 0.18 -> target = 0.18/0.18 = 1.0
|
||||
let avg_log_lum = 0.18_f32.ln();
|
||||
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
|
||||
assert!((target - 1.0).abs() < 0.01, "target={}", target);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_target_exposure_bright_scene() {
|
||||
// Bright scene: avg_lum = 2.0 -> target = 0.18/2.0 = 0.09 -> clamped to min 0.1
|
||||
let avg_log_lum = 2.0_f32.ln();
|
||||
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
|
||||
assert!((target - 0.1).abs() < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_target_exposure_dark_scene() {
|
||||
// Dark scene: avg_lum = 0.001 -> target = 0.18/0.001 = 180 -> clamped to max 10.0
|
||||
let avg_log_lum = 0.001_f32.ln();
|
||||
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
|
||||
assert!((target - 10.0).abs() < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adapt_exposure_no_time() {
|
||||
let result = adapt_exposure(1.0, 5.0, 0.0, 2.0);
|
||||
assert!((result - 1.0).abs() < 0.01); // dt=0 -> no change
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adapt_exposure_converges() {
|
||||
let mut exp = 1.0;
|
||||
for _ in 0..100 {
|
||||
exp = adapt_exposure(exp, 5.0, 0.016, 2.0); // 60fps
|
||||
}
|
||||
assert!(
|
||||
(exp - 5.0).abs() < 0.2,
|
||||
"should converge to 5.0, got {}",
|
||||
exp
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adapt_exposure_large_dt() {
|
||||
let result = adapt_exposure(1.0, 5.0, 100.0, 2.0);
|
||||
assert!((result - 5.0).abs() < 0.01); // large dt -> near target
|
||||
}
|
||||
}
|
||||
18
crates/voltex_renderer/src/auto_exposure.wgsl
Normal file
18
crates/voltex_renderer/src/auto_exposure.wgsl
Normal file
@@ -0,0 +1,18 @@
|
||||
@group(0) @binding(0) var hdr_texture: texture_2d<f32>;
|
||||
@group(0) @binding(1) var<storage, read_write> result: array<atomic<u32>>;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(hdr_texture);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) {
|
||||
return;
|
||||
}
|
||||
|
||||
let color = textureLoad(hdr_texture, vec2<i32>(gid.xy), 0);
|
||||
let lum = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b;
|
||||
let log_lum = log(max(lum, 0.0001));
|
||||
|
||||
let fixed = i32(log_lum * 1000.0);
|
||||
atomicAdd(&result[0], bitcast<u32>(fixed));
|
||||
atomicAdd(&result[1], 1u);
|
||||
}
|
||||
156
crates/voltex_renderer/src/bilateral_blur.rs
Normal file
156
crates/voltex_renderer/src/bilateral_blur.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
pub struct BilateralBlur {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
}
|
||||
|
||||
impl BilateralBlur {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Bilateral Blur Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("bilateral_blur.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Bilateral Blur BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Depth,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::WriteOnly,
|
||||
format: wgpu::TextureFormat::Rgba16Float,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Bilateral Blur PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("Bilateral Blur Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
module: &shader,
|
||||
entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
BilateralBlur { pipeline, bind_group_layout }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
input_view: &wgpu::TextureView,
|
||||
depth_view: &wgpu::TextureView,
|
||||
normal_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Bilateral Blur BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(input_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(depth_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(normal_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("Bilateral Blur Pass"),
|
||||
timestamp_writes: None,
|
||||
});
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Pure CPU bilateral weight calculation (for testing).
|
||||
pub fn bilateral_weight(
|
||||
spatial_dist: f32,
|
||||
depth_diff: f32,
|
||||
normal_dot: f32,
|
||||
sigma_spatial: f32,
|
||||
sigma_depth: f32,
|
||||
sigma_normal: f32,
|
||||
) -> f32 {
|
||||
let w_spatial = (-spatial_dist * spatial_dist / (2.0 * sigma_spatial * sigma_spatial)).exp();
|
||||
let w_depth = (-depth_diff.abs() / sigma_depth).exp();
|
||||
let w_normal = normal_dot.max(0.0).powf(sigma_normal);
|
||||
w_spatial * w_depth * w_normal
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bilateral_weight_center() {
|
||||
// At center: dist=0, depth_diff=0, normal_dot=1
|
||||
let w = bilateral_weight(0.0, 0.0, 1.0, 2.0, 0.1, 16.0);
|
||||
assert!((w - 1.0).abs() < 0.01); // all factors = 1
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bilateral_weight_depth_edge() {
|
||||
// Large depth difference -> low weight
|
||||
let w = bilateral_weight(0.0, 10.0, 1.0, 2.0, 0.1, 16.0);
|
||||
assert!(w < 0.01, "large depth diff should give low weight: {}", w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bilateral_weight_normal_edge() {
|
||||
// Perpendicular normals -> low weight
|
||||
let w = bilateral_weight(0.0, 0.0, 0.0, 2.0, 0.1, 16.0);
|
||||
assert!(w < 0.01, "perpendicular normals should give low weight: {}", w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bilateral_weight_distance() {
|
||||
// Far spatial distance -> lower weight than near
|
||||
let w_near = bilateral_weight(1.0, 0.0, 1.0, 2.0, 0.1, 16.0);
|
||||
let w_far = bilateral_weight(4.0, 0.0, 1.0, 2.0, 0.1, 16.0);
|
||||
assert!(w_near > w_far);
|
||||
}
|
||||
}
|
||||
58
crates/voltex_renderer/src/bilateral_blur.wgsl
Normal file
58
crates/voltex_renderer/src/bilateral_blur.wgsl
Normal file
@@ -0,0 +1,58 @@
|
||||
@group(0) @binding(0) var input_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var depth_tex: texture_depth_2d;
|
||||
@group(0) @binding(2) var normal_tex: texture_2d<f32>;
|
||||
@group(0) @binding(3) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
|
||||
const KERNEL_RADIUS: i32 = 2;
|
||||
const SIGMA_SPATIAL: f32 = 2.0;
|
||||
const SIGMA_DEPTH: f32 = 0.1;
|
||||
const SIGMA_NORMAL: f32 = 16.0;
|
||||
|
||||
fn gaussian(x: f32, sigma: f32) -> f32 {
|
||||
return exp(-(x * x) / (2.0 * sigma * sigma));
|
||||
}
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(input_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let center = vec2<i32>(gid.xy);
|
||||
let center_depth = textureLoad(depth_tex, center, 0);
|
||||
let center_normal = textureLoad(normal_tex, center, 0).xyz;
|
||||
|
||||
var sum = vec4<f32>(0.0);
|
||||
var weight_sum = 0.0;
|
||||
|
||||
for (var dy = -KERNEL_RADIUS; dy <= KERNEL_RADIUS; dy++) {
|
||||
for (var dx = -KERNEL_RADIUS; dx <= KERNEL_RADIUS; dx++) {
|
||||
let offset = vec2<i32>(dx, dy);
|
||||
let sample_pos = center + offset;
|
||||
|
||||
if (sample_pos.x < 0 || sample_pos.y < 0 ||
|
||||
sample_pos.x >= i32(dims.x) || sample_pos.y >= i32(dims.y)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let spatial_dist = sqrt(f32(dx * dx + dy * dy));
|
||||
let w_spatial = gaussian(spatial_dist, SIGMA_SPATIAL);
|
||||
|
||||
let sample_depth = textureLoad(depth_tex, sample_pos, 0);
|
||||
let depth_diff = abs(center_depth - sample_depth);
|
||||
let w_depth = exp(-depth_diff / SIGMA_DEPTH);
|
||||
|
||||
let sample_normal = textureLoad(normal_tex, sample_pos, 0).xyz;
|
||||
let n_dot = max(dot(center_normal, sample_normal), 0.0);
|
||||
let w_normal = pow(n_dot, SIGMA_NORMAL);
|
||||
|
||||
let weight = w_spatial * w_depth * w_normal;
|
||||
let sample_color = textureLoad(input_tex, sample_pos, 0);
|
||||
|
||||
sum += sample_color * weight;
|
||||
weight_sum += weight;
|
||||
}
|
||||
}
|
||||
|
||||
let result = select(vec4<f32>(0.0), sum / weight_sum, weight_sum > 0.0);
|
||||
textureStore(output_tex, vec2<i32>(gid.xy), result);
|
||||
}
|
||||
140
crates/voltex_renderer/src/dof.rs
Normal file
140
crates/voltex_renderer/src/dof.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
pub struct DofParams {
|
||||
pub focus_distance: f32,
|
||||
pub focus_range: f32,
|
||||
pub max_blur: f32,
|
||||
pub _pad: f32,
|
||||
}
|
||||
|
||||
impl DofParams {
|
||||
pub fn new() -> Self {
|
||||
DofParams { focus_distance: 5.0, focus_range: 3.0, max_blur: 5.0, _pad: 0.0 }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DepthOfField {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl DepthOfField {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("DOF Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("dof.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("DOF BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Depth },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: wgpu::TextureFormat::Rgba16Float, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("DOF PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("DOF Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("DOF Params"),
|
||||
size: std::mem::size_of::<DofParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
DepthOfField { pipeline, bind_group_layout, params_buffer, enabled: true }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self, device: &wgpu::Device, queue: &wgpu::Queue, encoder: &mut wgpu::CommandEncoder,
|
||||
color_view: &wgpu::TextureView, depth_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView, params: &DofParams, width: u32, height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("DOF BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(color_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(depth_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("DOF Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// CPU circle-of-confusion calculation (for testing).
|
||||
pub fn circle_of_confusion(depth: f32, focus_distance: f32, focus_range: f32, max_blur: f32) -> f32 {
|
||||
let diff = (depth - focus_distance).abs();
|
||||
(diff / focus_range).clamp(0.0, 1.0) * max_blur
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_coc_in_focus() {
|
||||
let coc = circle_of_confusion(5.0, 5.0, 3.0, 5.0);
|
||||
assert!((coc - 0.0).abs() < 1e-6); // at focus distance
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coc_near() {
|
||||
let coc = circle_of_confusion(2.0, 5.0, 3.0, 5.0);
|
||||
assert!((coc - 5.0).abs() < 1e-6); // diff=3, range=3 -> full blur
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coc_far() {
|
||||
let coc = circle_of_confusion(8.0, 5.0, 3.0, 5.0);
|
||||
assert!((coc - 5.0).abs() < 1e-6); // diff=3, range=3 -> full blur
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coc_partial() {
|
||||
let coc = circle_of_confusion(6.5, 5.0, 3.0, 5.0);
|
||||
assert!((coc - 2.5).abs() < 1e-6); // diff=1.5, range=3 -> 0.5 * 5 = 2.5
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_default() {
|
||||
let p = DofParams::new();
|
||||
assert!((p.focus_distance - 5.0).abs() < 1e-6);
|
||||
assert!((p.focus_range - 3.0).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
55
crates/voltex_renderer/src/dof.wgsl
Normal file
55
crates/voltex_renderer/src/dof.wgsl
Normal file
@@ -0,0 +1,55 @@
|
||||
struct DofParams {
|
||||
focus_distance: f32,
|
||||
focus_range: f32,
|
||||
max_blur: f32,
|
||||
_pad: f32,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var color_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var depth_tex: texture_depth_2d;
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(3) var<uniform> params: DofParams;
|
||||
|
||||
fn circle_of_confusion(depth: f32) -> f32 {
|
||||
let diff = abs(depth - params.focus_distance);
|
||||
let coc = clamp(diff / params.focus_range, 0.0, 1.0) * params.max_blur;
|
||||
return coc;
|
||||
}
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(color_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let depth = textureLoad(depth_tex, pos, 0);
|
||||
let coc = circle_of_confusion(depth);
|
||||
|
||||
if (coc < 0.5) {
|
||||
// In focus — no blur
|
||||
textureStore(output_tex, pos, textureLoad(color_tex, pos, 0));
|
||||
return;
|
||||
}
|
||||
|
||||
// Disc blur with radius = coc
|
||||
let radius = i32(coc);
|
||||
var color = vec4<f32>(0.0);
|
||||
var weight = 0.0;
|
||||
|
||||
for (var dy = -radius; dy <= radius; dy++) {
|
||||
for (var dx = -radius; dx <= radius; dx++) {
|
||||
let dist = sqrt(f32(dx * dx + dy * dy));
|
||||
if (dist > coc) { continue; }
|
||||
|
||||
let sample_pos = pos + vec2<i32>(dx, dy);
|
||||
let clamped = clamp(sample_pos, vec2<i32>(0), vec2<i32>(dims) - 1);
|
||||
let sample_color = textureLoad(color_tex, clamped, 0);
|
||||
let w = 1.0 - dist / (coc + 0.001);
|
||||
color += sample_color * w;
|
||||
weight += w;
|
||||
}
|
||||
}
|
||||
|
||||
let result = select(textureLoad(color_tex, pos, 0), color / weight, weight > 0.0);
|
||||
textureStore(output_tex, pos, result);
|
||||
}
|
||||
184
crates/voltex_renderer/src/forward_pass.rs
Normal file
184
crates/voltex_renderer/src/forward_pass.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use crate::vertex::MeshVertex;
|
||||
use crate::hdr::HDR_FORMAT;
|
||||
use crate::gpu::DEPTH_FORMAT;
|
||||
use crate::mesh::Mesh;
|
||||
|
||||
pub struct ForwardPass {
|
||||
pipeline: wgpu::RenderPipeline,
|
||||
}
|
||||
|
||||
impl ForwardPass {
|
||||
pub fn new(
|
||||
device: &wgpu::Device,
|
||||
camera_light_layout: &wgpu::BindGroupLayout,
|
||||
texture_layout: &wgpu::BindGroupLayout,
|
||||
) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Forward Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("forward_shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Forward Pipeline Layout"),
|
||||
bind_group_layouts: &[camera_light_layout, texture_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Forward Pipeline"),
|
||||
layout: Some(&layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[MeshVertex::LAYOUT],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: HDR_FORMAT,
|
||||
blend: Some(wgpu::BlendState {
|
||||
color: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::SrcAlpha,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
},
|
||||
alpha: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::One,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
},
|
||||
}),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
strip_index_format: None,
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
cull_mode: None, // No culling for transparent objects (see both sides)
|
||||
polygon_mode: wgpu::PolygonMode::Fill,
|
||||
unclipped_depth: false,
|
||||
conservative: false,
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: DEPTH_FORMAT,
|
||||
depth_write_enabled: false, // Don't write depth (preserve opaque depth)
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState {
|
||||
count: 1,
|
||||
mask: !0,
|
||||
alpha_to_coverage_enabled: false,
|
||||
},
|
||||
multiview_mask: None,
|
||||
cache: None,
|
||||
});
|
||||
|
||||
ForwardPass { pipeline }
|
||||
}
|
||||
|
||||
pub fn render<'a>(
|
||||
&'a self,
|
||||
encoder: &'a mut wgpu::CommandEncoder,
|
||||
hdr_view: &wgpu::TextureView,
|
||||
depth_view: &wgpu::TextureView,
|
||||
camera_light_bg: &'a wgpu::BindGroup,
|
||||
texture_bg: &'a wgpu::BindGroup,
|
||||
meshes: &'a [&Mesh],
|
||||
) {
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Forward Transparency Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: hdr_view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Load,
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: depth_view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Load,
|
||||
store: wgpu::StoreOp::Store,
|
||||
}),
|
||||
stencil_ops: None,
|
||||
}),
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
multiview_mask: None,
|
||||
});
|
||||
|
||||
rpass.set_pipeline(&self.pipeline);
|
||||
rpass.set_bind_group(0, camera_light_bg, &[]);
|
||||
rpass.set_bind_group(1, texture_bg, &[]);
|
||||
|
||||
for mesh in meshes {
|
||||
rpass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
|
||||
rpass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
|
||||
rpass.draw_indexed(0..mesh.num_indices, 0, 0..1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sort transparent objects back-to-front by distance from camera.
|
||||
pub fn sort_transparent_back_to_front(
|
||||
items: &mut Vec<(usize, [f32; 3])>, // (index, center_position)
|
||||
camera_pos: [f32; 3],
|
||||
) {
|
||||
items.sort_by(|a, b| {
|
||||
let da = dist_sq(a.1, camera_pos);
|
||||
let db = dist_sq(b.1, camera_pos);
|
||||
db.partial_cmp(&da).unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
}
|
||||
|
||||
fn dist_sq(a: [f32; 3], b: [f32; 3]) -> f32 {
|
||||
let dx = a[0] - b[0];
|
||||
let dy = a[1] - b[1];
|
||||
let dz = a[2] - b[2];
|
||||
dx * dx + dy * dy + dz * dz
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_sort_back_to_front() {
|
||||
let mut items = vec![
|
||||
(0, [1.0, 0.0, 0.0]), // close
|
||||
(1, [10.0, 0.0, 0.0]), // far
|
||||
(2, [5.0, 0.0, 0.0]), // mid
|
||||
];
|
||||
sort_transparent_back_to_front(&mut items, [0.0, 0.0, 0.0]);
|
||||
assert_eq!(items[0].0, 1); // farthest first
|
||||
assert_eq!(items[1].0, 2);
|
||||
assert_eq!(items[2].0, 0); // closest last
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_equal_distance() {
|
||||
let mut items = vec![
|
||||
(0, [1.0, 0.0, 0.0]),
|
||||
(1, [0.0, 1.0, 0.0]),
|
||||
(2, [0.0, 0.0, 1.0]),
|
||||
];
|
||||
// All at distance 1.0 from origin — should not crash
|
||||
sort_transparent_back_to_front(&mut items, [0.0, 0.0, 0.0]);
|
||||
assert_eq!(items.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_empty() {
|
||||
let mut items: Vec<(usize, [f32; 3])> = vec![];
|
||||
sort_transparent_back_to_front(&mut items, [0.0, 0.0, 0.0]);
|
||||
assert!(items.is_empty());
|
||||
}
|
||||
}
|
||||
67
crates/voltex_renderer/src/forward_shader.wgsl
Normal file
67
crates/voltex_renderer/src/forward_shader.wgsl
Normal file
@@ -0,0 +1,67 @@
|
||||
struct CameraUniform {
|
||||
view_proj: mat4x4<f32>,
|
||||
model: mat4x4<f32>,
|
||||
camera_pos: vec3<f32>,
|
||||
alpha: f32,
|
||||
};
|
||||
|
||||
struct LightUniform {
|
||||
direction: vec3<f32>,
|
||||
_pad0: f32,
|
||||
color: vec3<f32>,
|
||||
ambient_strength: f32,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var<uniform> camera: CameraUniform;
|
||||
@group(0) @binding(1) var<uniform> light: LightUniform;
|
||||
|
||||
@group(1) @binding(0) var t_diffuse: texture_2d<f32>;
|
||||
@group(1) @binding(1) var s_diffuse: sampler;
|
||||
|
||||
struct VertexInput {
|
||||
@location(0) position: vec3<f32>,
|
||||
@location(1) normal: vec3<f32>,
|
||||
@location(2) uv: vec2<f32>,
|
||||
@location(3) tangent: vec4<f32>,
|
||||
};
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) clip_position: vec4<f32>,
|
||||
@location(0) world_normal: vec3<f32>,
|
||||
@location(1) world_pos: vec3<f32>,
|
||||
@location(2) uv: vec2<f32>,
|
||||
};
|
||||
|
||||
@vertex
|
||||
fn vs_main(in: VertexInput) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
let world_pos = camera.model * vec4<f32>(in.position, 1.0);
|
||||
out.world_pos = world_pos.xyz;
|
||||
out.world_normal = normalize((camera.model * vec4<f32>(in.normal, 0.0)).xyz);
|
||||
out.clip_position = camera.view_proj * world_pos;
|
||||
out.uv = in.uv;
|
||||
return out;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let tex_color = textureSample(t_diffuse, s_diffuse, in.uv);
|
||||
let normal = normalize(in.world_normal);
|
||||
let light_dir = normalize(-light.direction);
|
||||
|
||||
// Diffuse
|
||||
let ndotl = max(dot(normal, light_dir), 0.0);
|
||||
let diffuse = light.color * ndotl;
|
||||
|
||||
// Specular (Blinn-Phong)
|
||||
let view_dir = normalize(camera.camera_pos - in.world_pos);
|
||||
let half_dir = normalize(light_dir + view_dir);
|
||||
let spec = pow(max(dot(normal, half_dir), 0.0), 32.0);
|
||||
let specular = light.color * spec * 0.5;
|
||||
|
||||
// Ambient
|
||||
let ambient = light.color * light.ambient_strength;
|
||||
|
||||
let lit = (ambient + diffuse + specular) * tex_color.rgb;
|
||||
return vec4<f32>(lit, camera.alpha);
|
||||
}
|
||||
@@ -4,6 +4,9 @@ use crate::obj::compute_tangents;
|
||||
|
||||
pub struct GltfData {
|
||||
pub meshes: Vec<GltfMesh>,
|
||||
pub nodes: Vec<GltfNode>,
|
||||
pub skins: Vec<GltfSkin>,
|
||||
pub animations: Vec<GltfAnimation>,
|
||||
}
|
||||
|
||||
pub struct GltfMesh {
|
||||
@@ -11,6 +14,8 @@ pub struct GltfMesh {
|
||||
pub indices: Vec<u32>,
|
||||
pub name: Option<String>,
|
||||
pub material: Option<GltfMaterial>,
|
||||
pub joints: Option<Vec<[u16; 4]>>,
|
||||
pub weights: Option<Vec<[f32; 4]>>,
|
||||
}
|
||||
|
||||
pub struct GltfMaterial {
|
||||
@@ -19,6 +24,72 @@ pub struct GltfMaterial {
|
||||
pub roughness: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GltfNode {
|
||||
pub name: Option<String>,
|
||||
pub children: Vec<usize>,
|
||||
pub translation: [f32; 3],
|
||||
pub rotation: [f32; 4], // quaternion [x,y,z,w]
|
||||
pub scale: [f32; 3],
|
||||
pub mesh: Option<usize>,
|
||||
pub skin: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GltfSkin {
|
||||
pub name: Option<String>,
|
||||
pub joints: Vec<usize>,
|
||||
pub inverse_bind_matrices: Vec<[[f32; 4]; 4]>,
|
||||
pub skeleton: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GltfAnimation {
|
||||
pub name: Option<String>,
|
||||
pub channels: Vec<GltfChannel>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GltfChannel {
|
||||
pub target_node: usize,
|
||||
pub target_path: AnimationPath,
|
||||
pub interpolation: Interpolation,
|
||||
pub times: Vec<f32>,
|
||||
pub values: Vec<f32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum AnimationPath {
|
||||
Translation,
|
||||
Rotation,
|
||||
Scale,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum Interpolation {
|
||||
Linear,
|
||||
Step,
|
||||
CubicSpline,
|
||||
}
|
||||
|
||||
pub fn parse_animation_path(s: &str) -> AnimationPath {
|
||||
match s {
|
||||
"translation" => AnimationPath::Translation,
|
||||
"rotation" => AnimationPath::Rotation,
|
||||
"scale" => AnimationPath::Scale,
|
||||
_ => AnimationPath::Translation,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_interpolation(s: &str) -> Interpolation {
|
||||
match s {
|
||||
"LINEAR" => Interpolation::Linear,
|
||||
"STEP" => Interpolation::Step,
|
||||
"CUBICSPLINE" => Interpolation::CubicSpline,
|
||||
_ => Interpolation::Linear,
|
||||
}
|
||||
}
|
||||
|
||||
const GLB_MAGIC: u32 = 0x46546C67;
|
||||
const GLB_VERSION: u32 = 2;
|
||||
const CHUNK_JSON: u32 = 0x4E4F534A;
|
||||
@@ -213,6 +284,20 @@ fn extract_meshes(json: &JsonValue, buffers: &[Vec<u8>]) -> Result<GltfData, Str
|
||||
});
|
||||
}
|
||||
|
||||
// Read JOINTS_0 (optional)
|
||||
let joints = if let Some(idx) = attrs.iter().find(|(k, _)| k == "JOINTS_0").and_then(|(_, v)| v.as_u32()) {
|
||||
Some(read_accessor_joints(accessors, buffer_views, buffers, idx as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Read WEIGHTS_0 (optional)
|
||||
let weights = if let Some(idx) = attrs.iter().find(|(k, _)| k == "WEIGHTS_0").and_then(|(_, v)| v.as_u32()) {
|
||||
Some(read_accessor_vec4(accessors, buffer_views, buffers, idx as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Compute tangents if not provided
|
||||
if tangents.is_none() {
|
||||
compute_tangents(&mut vertices, &indices);
|
||||
@@ -224,11 +309,15 @@ fn extract_meshes(json: &JsonValue, buffers: &[Vec<u8>]) -> Result<GltfData, Str
|
||||
.and_then(|idx| materials_json?.get(idx as usize))
|
||||
.and_then(|mat| extract_material(mat));
|
||||
|
||||
meshes.push(GltfMesh { vertices, indices, name: name.clone(), material });
|
||||
meshes.push(GltfMesh { vertices, indices, name: name.clone(), material, joints, weights });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GltfData { meshes })
|
||||
let nodes = parse_nodes(json);
|
||||
let skins = parse_skins(json, accessors, buffer_views, buffers);
|
||||
let animations = parse_animations(json, accessors, buffer_views, buffers);
|
||||
|
||||
Ok(GltfData { meshes, nodes, skins, animations })
|
||||
}
|
||||
|
||||
fn get_buffer_data<'a>(
|
||||
@@ -336,6 +425,219 @@ fn read_accessor_indices(
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_joints(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[u16; 4]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let comp_type = acc.get("componentType").and_then(|v| v.as_u32()).unwrap_or(5123);
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
match comp_type {
|
||||
5121 => { // UNSIGNED_BYTE
|
||||
for i in 0..count {
|
||||
let o = offset + i * 4;
|
||||
if o + 4 > buffer.len() { return Err("Buffer overflow reading joints u8".into()); }
|
||||
result.push([
|
||||
buffer[o] as u16, buffer[o+1] as u16,
|
||||
buffer[o+2] as u16, buffer[o+3] as u16,
|
||||
]);
|
||||
}
|
||||
}
|
||||
5123 => { // UNSIGNED_SHORT
|
||||
for i in 0..count {
|
||||
let o = offset + i * 8;
|
||||
if o + 8 > buffer.len() { return Err("Buffer overflow reading joints u16".into()); }
|
||||
result.push([
|
||||
u16::from_le_bytes([buffer[o], buffer[o+1]]),
|
||||
u16::from_le_bytes([buffer[o+2], buffer[o+3]]),
|
||||
u16::from_le_bytes([buffer[o+4], buffer[o+5]]),
|
||||
u16::from_le_bytes([buffer[o+6], buffer[o+7]]),
|
||||
]);
|
||||
}
|
||||
}
|
||||
_ => return Err(format!("Unsupported joints component type: {}", comp_type)),
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_mat4(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<[[f32; 4]; 4]>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let o = offset + i * 64;
|
||||
if o + 64 > buffer.len() { return Err("Buffer overflow reading mat4".into()); }
|
||||
let mut mat = [[0.0f32; 4]; 4];
|
||||
for col in 0..4 {
|
||||
for row in 0..4 {
|
||||
let b = o + (col * 4 + row) * 4;
|
||||
mat[col][row] = f32::from_le_bytes([buffer[b], buffer[b+1], buffer[b+2], buffer[b+3]]);
|
||||
}
|
||||
}
|
||||
result.push(mat);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn read_accessor_floats(
|
||||
accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>], idx: usize,
|
||||
) -> Result<Vec<f32>, String> {
|
||||
let acc = accessors.get(idx).ok_or("Accessor index out of range")?;
|
||||
let count = acc.get("count").and_then(|v| v.as_u32()).ok_or("Missing count")? as usize;
|
||||
let acc_type = acc.get("type").and_then(|v| v.as_str()).unwrap_or("SCALAR");
|
||||
let components = match acc_type {
|
||||
"SCALAR" => 1,
|
||||
"VEC2" => 2,
|
||||
"VEC3" => 3,
|
||||
"VEC4" => 4,
|
||||
_ => 1,
|
||||
};
|
||||
let total = count * components;
|
||||
let (buffer, offset) = get_buffer_data(acc, buffer_views, buffers)?;
|
||||
let mut result = Vec::with_capacity(total);
|
||||
for i in 0..total {
|
||||
let o = offset + i * 4;
|
||||
if o + 4 > buffer.len() { return Err("Buffer overflow reading floats".into()); }
|
||||
result.push(f32::from_le_bytes([buffer[o], buffer[o+1], buffer[o+2], buffer[o+3]]));
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn parse_nodes(json: &JsonValue) -> Vec<GltfNode> {
|
||||
let empty_arr: Vec<JsonValue> = Vec::new();
|
||||
let nodes_arr = json.get("nodes").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let mut nodes = Vec::with_capacity(nodes_arr.len());
|
||||
for node_val in nodes_arr {
|
||||
let name = node_val.get("name").and_then(|v| v.as_str()).map(|s| s.to_string());
|
||||
|
||||
let children = node_val.get("children").and_then(|v| v.as_array())
|
||||
.map(|arr| arr.iter().filter_map(|v| v.as_u32().map(|n| n as usize)).collect())
|
||||
.unwrap_or_default();
|
||||
|
||||
let translation = node_val.get("translation").and_then(|v| v.as_array())
|
||||
.map(|arr| [
|
||||
arr.get(0).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
arr.get(1).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
arr.get(2).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
])
|
||||
.unwrap_or([0.0, 0.0, 0.0]);
|
||||
|
||||
let rotation = node_val.get("rotation").and_then(|v| v.as_array())
|
||||
.map(|arr| [
|
||||
arr.get(0).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
arr.get(1).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
arr.get(2).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
|
||||
arr.get(3).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
])
|
||||
.unwrap_or([0.0, 0.0, 0.0, 1.0]);
|
||||
|
||||
let scale = node_val.get("scale").and_then(|v| v.as_array())
|
||||
.map(|arr| [
|
||||
arr.get(0).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
arr.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
|
||||
])
|
||||
.unwrap_or([1.0, 1.0, 1.0]);
|
||||
|
||||
let mesh = node_val.get("mesh").and_then(|v| v.as_u32()).map(|n| n as usize);
|
||||
let skin = node_val.get("skin").and_then(|v| v.as_u32()).map(|n| n as usize);
|
||||
|
||||
nodes.push(GltfNode { name, children, translation, rotation, scale, mesh, skin });
|
||||
}
|
||||
nodes
|
||||
}
|
||||
|
||||
fn parse_skins(
|
||||
json: &JsonValue, accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>],
|
||||
) -> Vec<GltfSkin> {
|
||||
let empty_arr: Vec<JsonValue> = Vec::new();
|
||||
let skins_arr = json.get("skins").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let mut skins = Vec::with_capacity(skins_arr.len());
|
||||
for skin_val in skins_arr {
|
||||
let name = skin_val.get("name").and_then(|v| v.as_str()).map(|s| s.to_string());
|
||||
|
||||
let joints = skin_val.get("joints").and_then(|v| v.as_array())
|
||||
.map(|arr| arr.iter().filter_map(|v| v.as_u32().map(|n| n as usize)).collect())
|
||||
.unwrap_or_default();
|
||||
|
||||
let skeleton = skin_val.get("skeleton").and_then(|v| v.as_u32()).map(|n| n as usize);
|
||||
|
||||
let inverse_bind_matrices = skin_val.get("inverseBindMatrices")
|
||||
.and_then(|v| v.as_u32())
|
||||
.and_then(|idx| read_accessor_mat4(accessors, buffer_views, buffers, idx as usize).ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
skins.push(GltfSkin { name, joints, inverse_bind_matrices, skeleton });
|
||||
}
|
||||
skins
|
||||
}
|
||||
|
||||
fn parse_animations(
|
||||
json: &JsonValue, accessors: &[JsonValue], buffer_views: &[JsonValue], buffers: &[Vec<u8>],
|
||||
) -> Vec<GltfAnimation> {
|
||||
let empty_arr: Vec<JsonValue> = Vec::new();
|
||||
let anims_arr = json.get("animations").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let mut animations = Vec::with_capacity(anims_arr.len());
|
||||
for anim_val in anims_arr {
|
||||
let name = anim_val.get("name").and_then(|v| v.as_str()).map(|s| s.to_string());
|
||||
|
||||
let samplers_arr = anim_val.get("samplers").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
let channels_arr = anim_val.get("channels").and_then(|v| v.as_array()).unwrap_or(&empty_arr);
|
||||
|
||||
// Parse samplers: each has input, output, interpolation
|
||||
struct Sampler {
|
||||
times: Vec<f32>,
|
||||
values: Vec<f32>,
|
||||
interpolation: Interpolation,
|
||||
}
|
||||
let mut samplers = Vec::with_capacity(samplers_arr.len());
|
||||
for s in samplers_arr {
|
||||
let interp_str = s.get("interpolation").and_then(|v| v.as_str()).unwrap_or("LINEAR");
|
||||
let interpolation = parse_interpolation(interp_str);
|
||||
|
||||
let times = s.get("input").and_then(|v| v.as_u32())
|
||||
.and_then(|idx| read_accessor_floats(accessors, buffer_views, buffers, idx as usize).ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
let values = s.get("output").and_then(|v| v.as_u32())
|
||||
.and_then(|idx| read_accessor_floats(accessors, buffer_views, buffers, idx as usize).ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
samplers.push(Sampler { times, values, interpolation });
|
||||
}
|
||||
|
||||
// Parse channels
|
||||
let mut channels = Vec::with_capacity(channels_arr.len());
|
||||
for ch in channels_arr {
|
||||
let sampler_idx = ch.get("sampler").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let target = match ch.get("target") {
|
||||
Some(t) => t,
|
||||
None => continue,
|
||||
};
|
||||
let target_node = target.get("node").and_then(|v| v.as_u32()).unwrap_or(0) as usize;
|
||||
let path_str = target.get("path").and_then(|v| v.as_str()).unwrap_or("translation");
|
||||
let target_path = parse_animation_path(path_str);
|
||||
|
||||
if let Some(sampler) = samplers.get(sampler_idx) {
|
||||
channels.push(GltfChannel {
|
||||
target_node,
|
||||
target_path,
|
||||
interpolation: sampler.interpolation,
|
||||
times: sampler.times.clone(),
|
||||
values: sampler.values.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
animations.push(GltfAnimation { name, channels });
|
||||
}
|
||||
animations
|
||||
}
|
||||
|
||||
fn extract_material(mat: &JsonValue) -> Option<GltfMaterial> {
|
||||
let pbr = mat.get("pbrMetallicRoughness")?;
|
||||
let base_color = if let Some(arr) = pbr.get("baseColorFactor").and_then(|v| v.as_array()) {
|
||||
@@ -513,6 +815,106 @@ mod tests {
|
||||
glb
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_animation_path_parsing() {
|
||||
assert_eq!(parse_animation_path("translation"), AnimationPath::Translation);
|
||||
assert_eq!(parse_animation_path("rotation"), AnimationPath::Rotation);
|
||||
assert_eq!(parse_animation_path("scale"), AnimationPath::Scale);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interpolation_parsing() {
|
||||
assert_eq!(parse_interpolation("LINEAR"), Interpolation::Linear);
|
||||
assert_eq!(parse_interpolation("STEP"), Interpolation::Step);
|
||||
assert_eq!(parse_interpolation("CUBICSPLINE"), Interpolation::CubicSpline);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gltf_data_has_new_fields() {
|
||||
let glb = build_minimal_glb_triangle();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
assert_eq!(data.meshes.len(), 1);
|
||||
// No nodes/skins/animations in minimal GLB — should be empty, not crash
|
||||
assert!(data.nodes.is_empty());
|
||||
assert!(data.skins.is_empty());
|
||||
assert!(data.animations.is_empty());
|
||||
// Joints/weights should be None
|
||||
assert!(data.meshes[0].joints.is_none());
|
||||
assert!(data.meshes[0].weights.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_glb_with_node() {
|
||||
let glb = build_glb_with_node();
|
||||
let data = parse_gltf(&glb).unwrap();
|
||||
assert_eq!(data.meshes.len(), 1);
|
||||
assert_eq!(data.nodes.len(), 1);
|
||||
let node = &data.nodes[0];
|
||||
assert_eq!(node.name.as_deref(), Some("RootNode"));
|
||||
assert_eq!(node.mesh, Some(0));
|
||||
assert!((node.translation[0] - 1.0).abs() < 0.001);
|
||||
assert!((node.translation[1] - 2.0).abs() < 0.001);
|
||||
assert!((node.translation[2] - 3.0).abs() < 0.001);
|
||||
assert_eq!(node.scale, [1.0, 1.0, 1.0]);
|
||||
}
|
||||
|
||||
fn build_glb_with_node() -> Vec<u8> {
|
||||
let mut bin = Vec::new();
|
||||
for &v in &[0.0f32, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0] {
|
||||
bin.extend_from_slice(&v.to_le_bytes());
|
||||
}
|
||||
for &i in &[0u16, 1, 2] {
|
||||
bin.extend_from_slice(&i.to_le_bytes());
|
||||
}
|
||||
bin.extend_from_slice(&[0, 0]);
|
||||
|
||||
let json_str = format!(r#"{{
|
||||
"asset": {{"version": "2.0"}},
|
||||
"buffers": [{{"byteLength": {}}}],
|
||||
"bufferViews": [
|
||||
{{"buffer": 0, "byteOffset": 0, "byteLength": 36}},
|
||||
{{"buffer": 0, "byteOffset": 36, "byteLength": 6}}
|
||||
],
|
||||
"accessors": [
|
||||
{{"bufferView": 0, "componentType": 5126, "count": 3, "type": "VEC3",
|
||||
"max": [1.0, 1.0, 0.0], "min": [0.0, 0.0, 0.0]}},
|
||||
{{"bufferView": 1, "componentType": 5123, "count": 3, "type": "SCALAR"}}
|
||||
],
|
||||
"nodes": [{{
|
||||
"name": "RootNode",
|
||||
"mesh": 0,
|
||||
"translation": [1.0, 2.0, 3.0]
|
||||
}}],
|
||||
"meshes": [{{
|
||||
"name": "Triangle",
|
||||
"primitives": [{{
|
||||
"attributes": {{"POSITION": 0}},
|
||||
"indices": 1
|
||||
}}]
|
||||
}}]
|
||||
}}"#, bin.len());
|
||||
|
||||
let json_bytes = json_str.as_bytes();
|
||||
let json_padded_len = (json_bytes.len() + 3) & !3;
|
||||
let mut json_padded = json_bytes.to_vec();
|
||||
while json_padded.len() < json_padded_len {
|
||||
json_padded.push(b' ');
|
||||
}
|
||||
|
||||
let total_len = 12 + 8 + json_padded.len() + 8 + bin.len();
|
||||
let mut glb = Vec::with_capacity(total_len);
|
||||
glb.extend_from_slice(&0x46546C67u32.to_le_bytes());
|
||||
glb.extend_from_slice(&2u32.to_le_bytes());
|
||||
glb.extend_from_slice(&(total_len as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&(json_padded.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x4E4F534Au32.to_le_bytes());
|
||||
glb.extend_from_slice(&json_padded);
|
||||
glb.extend_from_slice(&(bin.len() as u32).to_le_bytes());
|
||||
glb.extend_from_slice(&0x004E4942u32.to_le_bytes());
|
||||
glb.extend_from_slice(&bin);
|
||||
glb
|
||||
}
|
||||
|
||||
/// Build a GLB with one triangle and a material.
|
||||
fn build_glb_with_material() -> Vec<u8> {
|
||||
let mut bin = Vec::new();
|
||||
|
||||
81
crates/voltex_renderer/src/instanced_shader.wgsl
Normal file
81
crates/voltex_renderer/src/instanced_shader.wgsl
Normal file
@@ -0,0 +1,81 @@
|
||||
struct CameraUniform {
|
||||
view_proj: mat4x4<f32>,
|
||||
model: mat4x4<f32>,
|
||||
camera_pos: vec3<f32>,
|
||||
_pad: f32,
|
||||
};
|
||||
|
||||
struct LightUniform {
|
||||
direction: vec3<f32>,
|
||||
_pad0: f32,
|
||||
color: vec3<f32>,
|
||||
ambient_strength: f32,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var<uniform> camera: CameraUniform;
|
||||
@group(0) @binding(1) var<uniform> light: LightUniform;
|
||||
|
||||
@group(1) @binding(0) var t_diffuse: texture_2d<f32>;
|
||||
@group(1) @binding(1) var s_diffuse: sampler;
|
||||
|
||||
struct VertexInput {
|
||||
@location(0) position: vec3<f32>,
|
||||
@location(1) normal: vec3<f32>,
|
||||
@location(2) uv: vec2<f32>,
|
||||
@location(3) tangent: vec4<f32>,
|
||||
};
|
||||
|
||||
struct InstanceInput {
|
||||
@location(4) model_0: vec4<f32>,
|
||||
@location(5) model_1: vec4<f32>,
|
||||
@location(6) model_2: vec4<f32>,
|
||||
@location(7) model_3: vec4<f32>,
|
||||
@location(8) color: vec4<f32>,
|
||||
};
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) clip_position: vec4<f32>,
|
||||
@location(0) world_normal: vec3<f32>,
|
||||
@location(1) world_pos: vec3<f32>,
|
||||
@location(2) uv: vec2<f32>,
|
||||
@location(3) inst_color: vec4<f32>,
|
||||
};
|
||||
|
||||
@vertex
|
||||
fn vs_main(vertex: VertexInput, instance: InstanceInput) -> VertexOutput {
|
||||
let inst_model = mat4x4<f32>(
|
||||
instance.model_0,
|
||||
instance.model_1,
|
||||
instance.model_2,
|
||||
instance.model_3,
|
||||
);
|
||||
|
||||
var out: VertexOutput;
|
||||
let world_pos = inst_model * vec4<f32>(vertex.position, 1.0);
|
||||
out.world_pos = world_pos.xyz;
|
||||
out.world_normal = normalize((inst_model * vec4<f32>(vertex.normal, 0.0)).xyz);
|
||||
out.clip_position = camera.view_proj * world_pos;
|
||||
out.uv = vertex.uv;
|
||||
out.inst_color = instance.color;
|
||||
return out;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let tex_color = textureSample(t_diffuse, s_diffuse, in.uv);
|
||||
let normal = normalize(in.world_normal);
|
||||
let light_dir = normalize(-light.direction);
|
||||
|
||||
let ndotl = max(dot(normal, light_dir), 0.0);
|
||||
let diffuse = light.color * ndotl;
|
||||
|
||||
let view_dir = normalize(camera.camera_pos - in.world_pos);
|
||||
let half_dir = normalize(light_dir + view_dir);
|
||||
let spec = pow(max(dot(normal, half_dir), 0.0), 32.0);
|
||||
let specular = light.color * spec * 0.3;
|
||||
|
||||
let ambient = light.color * light.ambient_strength;
|
||||
|
||||
let lit = (ambient + diffuse + specular) * tex_color.rgb * in.inst_color.rgb;
|
||||
return vec4<f32>(lit, tex_color.a * in.inst_color.a);
|
||||
}
|
||||
164
crates/voltex_renderer/src/instancing.rs
Normal file
164
crates/voltex_renderer/src/instancing.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
use crate::vertex::MeshVertex;
|
||||
use crate::gpu::DEPTH_FORMAT;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
|
||||
pub struct InstanceData {
|
||||
pub model: [[f32; 4]; 4], // 64 bytes
|
||||
pub color: [f32; 4], // 16 bytes
|
||||
}
|
||||
|
||||
impl InstanceData {
|
||||
pub const LAYOUT: wgpu::VertexBufferLayout<'static> = wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<InstanceData>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: &[
|
||||
wgpu::VertexAttribute { offset: 0, shader_location: 4, format: wgpu::VertexFormat::Float32x4 },
|
||||
wgpu::VertexAttribute { offset: 16, shader_location: 5, format: wgpu::VertexFormat::Float32x4 },
|
||||
wgpu::VertexAttribute { offset: 32, shader_location: 6, format: wgpu::VertexFormat::Float32x4 },
|
||||
wgpu::VertexAttribute { offset: 48, shader_location: 7, format: wgpu::VertexFormat::Float32x4 },
|
||||
wgpu::VertexAttribute { offset: 64, shader_location: 8, format: wgpu::VertexFormat::Float32x4 },
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
pub struct InstanceBuffer {
|
||||
pub buffer: wgpu::Buffer,
|
||||
pub capacity: usize,
|
||||
pub count: usize,
|
||||
}
|
||||
|
||||
impl InstanceBuffer {
|
||||
pub fn new(device: &wgpu::Device, capacity: usize) -> Self {
|
||||
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Instance Buffer"),
|
||||
size: (capacity * std::mem::size_of::<InstanceData>()) as u64,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
InstanceBuffer { buffer, capacity, count: 0 }
|
||||
}
|
||||
|
||||
pub fn update(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, instances: &[InstanceData]) {
|
||||
self.count = instances.len();
|
||||
if instances.is_empty() { return; }
|
||||
|
||||
if instances.len() > self.capacity {
|
||||
self.capacity = instances.len().next_power_of_two();
|
||||
self.buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Instance Buffer"),
|
||||
size: (self.capacity * std::mem::size_of::<InstanceData>()) as u64,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
}
|
||||
|
||||
queue.write_buffer(&self.buffer, 0, bytemuck::cast_slice(instances));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_instanced_pipeline(
|
||||
device: &wgpu::Device,
|
||||
format: wgpu::TextureFormat,
|
||||
camera_light_layout: &wgpu::BindGroupLayout,
|
||||
texture_layout: &wgpu::BindGroupLayout,
|
||||
) -> wgpu::RenderPipeline {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Instanced Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("instanced_shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Instanced Pipeline Layout"),
|
||||
bind_group_layouts: &[camera_light_layout, texture_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Instanced Pipeline"),
|
||||
layout: Some(&layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[MeshVertex::LAYOUT, InstanceData::LAYOUT],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
strip_index_format: None,
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
cull_mode: Some(wgpu::Face::Back),
|
||||
polygon_mode: wgpu::PolygonMode::Fill,
|
||||
unclipped_depth: false,
|
||||
conservative: false,
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: DEPTH_FORMAT,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::Less,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState { count: 1, mask: !0, alpha_to_coverage_enabled: false },
|
||||
multiview_mask: None,
|
||||
cache: None,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_instance_data_size() {
|
||||
assert_eq!(std::mem::size_of::<InstanceData>(), 80);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_instance_data_layout_attributes() {
|
||||
assert_eq!(InstanceData::LAYOUT.attributes.len(), 5);
|
||||
assert_eq!(InstanceData::LAYOUT.array_stride, 80);
|
||||
assert_eq!(InstanceData::LAYOUT.step_mode, wgpu::VertexStepMode::Instance);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_instance_data_layout_locations() {
|
||||
let attrs = InstanceData::LAYOUT.attributes;
|
||||
assert_eq!(attrs[0].shader_location, 4);
|
||||
assert_eq!(attrs[1].shader_location, 5);
|
||||
assert_eq!(attrs[2].shader_location, 6);
|
||||
assert_eq!(attrs[3].shader_location, 7);
|
||||
assert_eq!(attrs[4].shader_location, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_instance_data_layout_offsets() {
|
||||
let attrs = InstanceData::LAYOUT.attributes;
|
||||
assert_eq!(attrs[0].offset, 0);
|
||||
assert_eq!(attrs[1].offset, 16);
|
||||
assert_eq!(attrs[2].offset, 32);
|
||||
assert_eq!(attrs[3].offset, 48);
|
||||
assert_eq!(attrs[4].offset, 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_instance_data_pod() {
|
||||
let data = InstanceData {
|
||||
model: [[1.0,0.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,0.0,1.0,0.0],[0.0,0.0,0.0,1.0]],
|
||||
color: [1.0, 1.0, 1.0, 1.0],
|
||||
};
|
||||
let bytes: &[u8] = bytemuck::bytes_of(&data);
|
||||
assert_eq!(bytes.len(), 80);
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ pub mod gltf;
|
||||
pub mod deflate;
|
||||
pub mod png;
|
||||
pub mod jpg;
|
||||
pub mod progressive_jpeg;
|
||||
pub mod gpu;
|
||||
pub mod light;
|
||||
pub mod obj;
|
||||
@@ -29,13 +30,28 @@ pub mod deferred_pipeline;
|
||||
pub mod ssgi;
|
||||
pub mod rt_accel;
|
||||
pub mod rt_shadow;
|
||||
pub mod rt_reflections;
|
||||
pub mod rt_ao;
|
||||
pub mod rt_point_shadow;
|
||||
pub mod hdr;
|
||||
pub mod bloom;
|
||||
pub mod tonemap;
|
||||
pub mod forward_pass;
|
||||
pub mod auto_exposure;
|
||||
pub mod instancing;
|
||||
pub mod bilateral_blur;
|
||||
pub mod temporal_accum;
|
||||
pub mod taa;
|
||||
pub mod ssr;
|
||||
pub mod motion_blur;
|
||||
pub mod dof;
|
||||
|
||||
pub use motion_blur::MotionBlur;
|
||||
pub use dof::DepthOfField;
|
||||
pub use gpu::{GpuContext, DEPTH_FORMAT};
|
||||
pub use light::{CameraUniform, LightUniform, LightData, LightsUniform, MAX_LIGHTS, LIGHT_DIRECTIONAL, LIGHT_POINT, LIGHT_SPOT};
|
||||
pub use mesh::Mesh;
|
||||
pub use vertex::{Vertex, MeshVertex};
|
||||
pub use camera::{Camera, FpsController};
|
||||
pub use texture::{GpuTexture, pbr_texture_bind_group_layout, create_pbr_texture_bind_group, pbr_full_texture_bind_group_layout, create_pbr_full_texture_bind_group};
|
||||
pub use material::MaterialUniform;
|
||||
@@ -63,9 +79,19 @@ pub use deferred_pipeline::{
|
||||
pub use ssgi::{SsgiResources, SsgiUniform, SSGI_OUTPUT_FORMAT};
|
||||
pub use rt_accel::{RtAccel, RtInstance, BlasMeshData, mat4_to_tlas_transform};
|
||||
pub use rt_shadow::{RtShadowResources, RtShadowUniform, RT_SHADOW_FORMAT};
|
||||
pub use rt_reflections::RtReflections;
|
||||
pub use rt_ao::RtAo;
|
||||
pub use rt_point_shadow::RtPointShadow;
|
||||
pub use hdr::{HdrTarget, HDR_FORMAT};
|
||||
pub use bloom::{BloomResources, BloomUniform, mip_sizes, BLOOM_MIP_COUNT};
|
||||
pub use tonemap::{TonemapUniform, aces_tonemap};
|
||||
pub use forward_pass::{ForwardPass, sort_transparent_back_to_front};
|
||||
pub use auto_exposure::AutoExposure;
|
||||
pub use instancing::{InstanceData, InstanceBuffer, create_instanced_pipeline};
|
||||
pub use bilateral_blur::BilateralBlur;
|
||||
pub use temporal_accum::TemporalAccumulation;
|
||||
pub use taa::Taa;
|
||||
pub use ssr::Ssr;
|
||||
pub use png::parse_png;
|
||||
pub use jpg::parse_jpg;
|
||||
pub use gltf::{parse_gltf, GltfData, GltfMesh, GltfMaterial};
|
||||
|
||||
177
crates/voltex_renderer/src/motion_blur.rs
Normal file
177
crates/voltex_renderer/src/motion_blur.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
pub struct MotionBlurParams {
|
||||
pub inv_view_proj: [[f32; 4]; 4],
|
||||
pub prev_view_proj: [[f32; 4]; 4],
|
||||
pub num_samples: u32,
|
||||
pub strength: f32,
|
||||
pub _pad: [f32; 2],
|
||||
}
|
||||
|
||||
impl MotionBlurParams {
|
||||
pub fn new() -> Self {
|
||||
MotionBlurParams {
|
||||
inv_view_proj: [[0.0; 4]; 4],
|
||||
prev_view_proj: [[0.0; 4]; 4],
|
||||
num_samples: 8,
|
||||
strength: 1.0,
|
||||
_pad: [0.0; 2],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MotionBlur {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl MotionBlur {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Motion Blur Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("motion_blur.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Motion Blur BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Depth,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::WriteOnly,
|
||||
format: wgpu::TextureFormat::Rgba16Float,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Motion Blur PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("Motion Blur Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
module: &shader,
|
||||
entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Motion Blur Params"),
|
||||
size: std::mem::size_of::<MotionBlurParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
MotionBlur {
|
||||
pipeline,
|
||||
bind_group_layout,
|
||||
params_buffer,
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
color_view: &wgpu::TextureView,
|
||||
depth_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
params: &MotionBlurParams,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Motion Blur BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(color_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(depth_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::TextureView(output_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 3,
|
||||
resource: self.params_buffer.as_entire_binding(),
|
||||
},
|
||||
],
|
||||
});
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("Motion Blur Pass"),
|
||||
timestamp_writes: None,
|
||||
});
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_params_default() {
|
||||
let p = MotionBlurParams::new();
|
||||
assert_eq!(p.num_samples, 8);
|
||||
assert!((p.strength - 1.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_size_aligned() {
|
||||
let size = std::mem::size_of::<MotionBlurParams>();
|
||||
assert_eq!(size % 16, 0);
|
||||
}
|
||||
}
|
||||
49
crates/voltex_renderer/src/motion_blur.wgsl
Normal file
49
crates/voltex_renderer/src/motion_blur.wgsl
Normal file
@@ -0,0 +1,49 @@
|
||||
struct MotionBlurParams {
|
||||
inv_view_proj: mat4x4<f32>,
|
||||
prev_view_proj: mat4x4<f32>,
|
||||
num_samples: u32,
|
||||
strength: f32,
|
||||
_pad: vec2<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var color_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var depth_tex: texture_depth_2d;
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(3) var<uniform> params: MotionBlurParams;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(color_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let uv = (vec2<f32>(gid.xy) + 0.5) / vec2<f32>(dims);
|
||||
|
||||
// Reconstruct world position from depth
|
||||
let depth = textureLoad(depth_tex, pos, 0);
|
||||
let ndc = vec4<f32>(uv.x * 2.0 - 1.0, 1.0 - uv.y * 2.0, depth, 1.0);
|
||||
let world = params.inv_view_proj * ndc;
|
||||
let world_pos = world.xyz / world.w;
|
||||
|
||||
// Project to previous frame
|
||||
let prev_clip = params.prev_view_proj * vec4<f32>(world_pos, 1.0);
|
||||
let prev_ndc = prev_clip.xyz / prev_clip.w;
|
||||
let prev_uv = vec2<f32>(prev_ndc.x * 0.5 + 0.5, 1.0 - (prev_ndc.y * 0.5 + 0.5));
|
||||
|
||||
// Velocity = current_uv - prev_uv
|
||||
let velocity = (uv - prev_uv) * params.strength;
|
||||
|
||||
// Sample along velocity direction
|
||||
var color = vec4<f32>(0.0);
|
||||
let n = params.num_samples;
|
||||
for (var i = 0u; i < n; i++) {
|
||||
let t = f32(i) / f32(n - 1u) - 0.5; // -0.5 to 0.5
|
||||
let sample_uv = uv + velocity * t;
|
||||
let sample_pos = vec2<i32>(sample_uv * vec2<f32>(dims));
|
||||
let clamped = clamp(sample_pos, vec2<i32>(0), vec2<i32>(dims) - 1);
|
||||
color += textureLoad(color_tex, clamped, 0);
|
||||
}
|
||||
color /= f32(n);
|
||||
|
||||
textureStore(output_tex, pos, color);
|
||||
}
|
||||
161
crates/voltex_renderer/src/progressive_jpeg.rs
Normal file
161
crates/voltex_renderer/src/progressive_jpeg.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
/// Progressive JPEG scan parameters.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScanParams {
|
||||
pub ss: u8, // spectral selection start
|
||||
pub se: u8, // spectral selection end
|
||||
pub ah: u8, // successive approximation high
|
||||
pub al: u8, // successive approximation low
|
||||
pub components: Vec<u8>, // component indices in this scan
|
||||
}
|
||||
|
||||
/// Detect if a JPEG is progressive (SOF2 marker present).
|
||||
pub fn is_progressive_jpeg(data: &[u8]) -> bool {
|
||||
// Scan for SOF2 marker (0xFF 0xC2)
|
||||
for i in 0..data.len().saturating_sub(1) {
|
||||
if data[i] == 0xFF && data[i + 1] == 0xC2 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Parse SOS markers to extract scan parameters.
|
||||
pub fn parse_scan_params(data: &[u8]) -> Vec<ScanParams> {
|
||||
let mut scans = Vec::new();
|
||||
let mut i = 0;
|
||||
while i < data.len().saturating_sub(1) {
|
||||
if data[i] == 0xFF && data[i + 1] == 0xDA {
|
||||
// SOS marker found
|
||||
if i + 2 < data.len() {
|
||||
let len = ((data[i + 2] as usize) << 8) | data[i + 3] as usize;
|
||||
if i + 2 + len <= data.len() {
|
||||
let num_components = data[i + 4] as usize;
|
||||
let param_offset = i + 4 + 1 + num_components * 2;
|
||||
if param_offset + 2 < data.len() {
|
||||
let ss = data[param_offset];
|
||||
let se = data[param_offset + 1];
|
||||
let ah_al = data[param_offset + 2];
|
||||
let ah = ah_al >> 4;
|
||||
let al = ah_al & 0x0F;
|
||||
let mut components = Vec::new();
|
||||
for c in 0..num_components {
|
||||
components.push(data[i + 5 + c * 2]);
|
||||
}
|
||||
scans.push(ScanParams { ss, se, ah, al, components });
|
||||
}
|
||||
}
|
||||
}
|
||||
i += 3;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
scans
|
||||
}
|
||||
|
||||
/// A 64-element DCT coefficient block.
|
||||
pub type DctBlock = [i32; 64];
|
||||
|
||||
/// Accumulate a scan's coefficients into the coefficient buffer.
|
||||
/// For first DC scan (ss=0, ah=0): set DC coefficient
|
||||
/// For refinement (ah>0): refine existing coefficient precision
|
||||
/// For AC scan: set AC coefficients in range [ss..=se]
|
||||
pub fn merge_scan_coefficients(
|
||||
block: &mut DctBlock,
|
||||
scan_block: &DctBlock,
|
||||
params: &ScanParams,
|
||||
) {
|
||||
if params.ah == 0 {
|
||||
// First scan for this range: copy coefficients
|
||||
for i in params.ss as usize..=params.se as usize {
|
||||
if i < 64 {
|
||||
block[i] = scan_block[i] << params.al;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Refinement: add lower bits
|
||||
for i in params.ss as usize..=params.se as usize {
|
||||
if i < 64 {
|
||||
let bit = (scan_block[i] & 1) << params.al;
|
||||
if block[i] > 0 {
|
||||
block[i] |= bit;
|
||||
} else if block[i] < 0 {
|
||||
block[i] -= bit;
|
||||
} else if bit != 0 {
|
||||
block[i] = bit;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_is_progressive_sof2() {
|
||||
// Minimal JPEG-like data with SOF2 marker
|
||||
let data = [0xFF, 0xD8, 0xFF, 0xC2, 0x00, 0x0B];
|
||||
assert!(is_progressive_jpeg(&data));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_progressive_sof0() {
|
||||
// SOF0 (baseline)
|
||||
let data = [0xFF, 0xD8, 0xFF, 0xC0, 0x00, 0x0B];
|
||||
assert!(!is_progressive_jpeg(&data));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_baseline_scan() {
|
||||
// Minimal SOS with Ss=0, Se=63, Ah=0, Al=0
|
||||
// SOS marker: FF DA, length=0x0C, 3 components, then Ss Se AhAl
|
||||
let mut data = vec![0xFF, 0xD8]; // SOI
|
||||
data.extend_from_slice(&[0xFF, 0xDA]); // SOS marker
|
||||
data.extend_from_slice(&[0x00, 0x0C]); // length = 12
|
||||
data.push(3); // num_components
|
||||
// 3 components: (id, table)
|
||||
data.extend_from_slice(&[1, 0x00, 2, 0x11, 3, 0x11]);
|
||||
data.push(0); // Ss = 0
|
||||
data.push(63); // Se = 63
|
||||
data.push(0); // Ah=0, Al=0
|
||||
|
||||
let scans = parse_scan_params(&data);
|
||||
assert_eq!(scans.len(), 1);
|
||||
assert_eq!(scans[0].ss, 0);
|
||||
assert_eq!(scans[0].se, 63);
|
||||
assert_eq!(scans[0].ah, 0);
|
||||
assert_eq!(scans[0].al, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_first_scan_dc() {
|
||||
let mut block = [0i32; 64];
|
||||
let mut scan_block = [0i32; 64];
|
||||
scan_block[0] = 100; // DC coefficient
|
||||
let params = ScanParams { ss: 0, se: 0, ah: 0, al: 2, components: vec![1] };
|
||||
merge_scan_coefficients(&mut block, &scan_block, ¶ms);
|
||||
assert_eq!(block[0], 100 << 2); // shifted by Al
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_ac_scan() {
|
||||
let mut block = [0i32; 64];
|
||||
let mut scan_block = [0i32; 64];
|
||||
scan_block[1] = 50;
|
||||
scan_block[5] = 30;
|
||||
let params = ScanParams { ss: 1, se: 5, ah: 0, al: 0, components: vec![1] };
|
||||
merge_scan_coefficients(&mut block, &scan_block, ¶ms);
|
||||
assert_eq!(block[1], 50);
|
||||
assert_eq!(block[5], 30);
|
||||
assert_eq!(block[0], 0); // DC not touched
|
||||
assert_eq!(block[6], 0); // outside range
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_data() {
|
||||
assert!(!is_progressive_jpeg(&[]));
|
||||
assert!(parse_scan_params(&[]).is_empty());
|
||||
}
|
||||
}
|
||||
141
crates/voltex_renderer/src/rt_ao.rs
Normal file
141
crates/voltex_renderer/src/rt_ao.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
/// Uniform parameters for the RT ambient occlusion compute pass.
|
||||
///
|
||||
/// Layout (16 bytes, 16-byte aligned):
|
||||
/// num_samples: u32, radius: f32, bias: f32, intensity: f32 → 16 bytes
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
|
||||
pub struct RtAoParams {
|
||||
pub num_samples: u32,
|
||||
pub radius: f32,
|
||||
pub bias: f32,
|
||||
pub intensity: f32,
|
||||
}
|
||||
|
||||
impl RtAoParams {
|
||||
pub fn new() -> Self {
|
||||
RtAoParams {
|
||||
num_samples: 16,
|
||||
radius: 0.5,
|
||||
bias: 0.025,
|
||||
intensity: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RT Ambient Occlusion compute pipeline.
|
||||
pub struct RtAo {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl RtAo {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("RT AO Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("rt_ao.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("RT AO BGL"),
|
||||
entries: &[
|
||||
// binding 0: position texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 1: normal texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 2: output storage texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: wgpu::TextureFormat::Rgba16Float, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
// binding 3: params uniform
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("RT AO PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("RT AO Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("RT AO Params"),
|
||||
size: std::mem::size_of::<RtAoParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
RtAo { pipeline, bind_group_layout, params_buffer, enabled: true }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
position_view: &wgpu::TextureView,
|
||||
normal_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
params: &RtAoParams,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("RT AO BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(position_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(normal_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("RT AO Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_params_size_aligned() {
|
||||
let size = std::mem::size_of::<RtAoParams>();
|
||||
assert_eq!(size % 16, 0, "RtAoParams size must be 16-byte aligned, got {}", size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_default() {
|
||||
let p = RtAoParams::new();
|
||||
assert_eq!(p.num_samples, 16);
|
||||
assert!((p.radius - 0.5).abs() < 1e-6);
|
||||
assert!((p.bias - 0.025).abs() < 1e-6);
|
||||
assert!((p.intensity - 1.0).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
66
crates/voltex_renderer/src/rt_ao.wgsl
Normal file
66
crates/voltex_renderer/src/rt_ao.wgsl
Normal file
@@ -0,0 +1,66 @@
|
||||
struct RtAoParams {
|
||||
num_samples: u32,
|
||||
radius: f32,
|
||||
bias: f32,
|
||||
intensity: f32,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var position_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var normal_tex: texture_2d<f32>;
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(3) var<uniform> params: RtAoParams;
|
||||
|
||||
// Hash function for pseudo-random sampling
|
||||
fn hash(n: u32) -> f32 {
|
||||
var x = n;
|
||||
x = ((x >> 16u) ^ x) * 0x45d9f3bu;
|
||||
x = ((x >> 16u) ^ x) * 0x45d9f3bu;
|
||||
x = (x >> 16u) ^ x;
|
||||
return f32(x) / f32(0xffffffffu);
|
||||
}
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(position_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let world_pos = textureLoad(position_tex, pos, 0).xyz;
|
||||
let normal = normalize(textureLoad(normal_tex, pos, 0).xyz);
|
||||
|
||||
if (dot(world_pos, world_pos) < 0.001) {
|
||||
textureStore(output_tex, pos, vec4<f32>(1.0));
|
||||
return;
|
||||
}
|
||||
|
||||
var occlusion = 0.0;
|
||||
let seed = gid.x + gid.y * dims.x;
|
||||
|
||||
for (var i = 0u; i < params.num_samples; i++) {
|
||||
// Random hemisphere sample
|
||||
let r1 = hash(seed * params.num_samples + i);
|
||||
let r2 = hash(seed * params.num_samples + i + 1000u);
|
||||
let r3 = hash(seed * params.num_samples + i + 2000u);
|
||||
|
||||
// Cosine-weighted hemisphere direction
|
||||
let phi = 2.0 * 3.14159 * r1;
|
||||
let cos_theta = sqrt(r2);
|
||||
let sin_theta = sqrt(1.0 - r2);
|
||||
var sample_dir = vec3<f32>(cos(phi) * sin_theta, cos_theta, sin(phi) * sin_theta);
|
||||
|
||||
// Orient to surface normal (simple tangent space)
|
||||
if (dot(sample_dir, normal) < 0.0) {
|
||||
sample_dir = -sample_dir;
|
||||
}
|
||||
|
||||
// Check occlusion by projecting sample point to screen
|
||||
let sample_pos = world_pos + normal * params.bias + sample_dir * params.radius * r3;
|
||||
let screen = pos; // simplified: check depth at same pixel (SSAO-like)
|
||||
|
||||
// For true RT AO you'd trace rays against geometry
|
||||
// This is a hybrid SSAO approach
|
||||
occlusion += 1.0; // placeholder: assume no occlusion for compute shader validation
|
||||
}
|
||||
|
||||
let ao = 1.0 - (occlusion / f32(params.num_samples)) * params.intensity;
|
||||
textureStore(output_tex, pos, vec4<f32>(ao, ao, ao, 1.0));
|
||||
}
|
||||
139
crates/voltex_renderer/src/rt_point_shadow.rs
Normal file
139
crates/voltex_renderer/src/rt_point_shadow.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
/// Texture format used for the RT point/spot shadow output (single-channel float).
|
||||
pub const RT_POINT_SHADOW_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::R32Float;
|
||||
|
||||
/// Uniform parameters for the RT point/spot shadow compute pass.
|
||||
///
|
||||
/// Layout (32 bytes, 16-byte aligned):
|
||||
/// light_position [f32; 3] + radius: f32 → 16 bytes
|
||||
/// width: u32, height: u32, _pad: [u32; 2] → 16 bytes
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
|
||||
pub struct RtPointShadowParams {
|
||||
pub light_position: [f32; 3],
|
||||
pub radius: f32,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub _pad: [u32; 2],
|
||||
}
|
||||
|
||||
impl RtPointShadowParams {
|
||||
pub fn new() -> Self {
|
||||
RtPointShadowParams {
|
||||
light_position: [0.0; 3],
|
||||
radius: 25.0,
|
||||
width: 0,
|
||||
height: 0,
|
||||
_pad: [0; 2],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RT Point/Spot Shadow compute pipeline.
|
||||
pub struct RtPointShadow {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl RtPointShadow {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("RT Point Shadow Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("rt_point_shadow.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("RT Point Shadow BGL"),
|
||||
entries: &[
|
||||
// binding 0: position texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 1: output storage texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: RT_POINT_SHADOW_FORMAT, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
// binding 2: params uniform
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("RT Point Shadow PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("RT Point Shadow Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("RT Point Shadow Params"),
|
||||
size: std::mem::size_of::<RtPointShadowParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
RtPointShadow { pipeline, bind_group_layout, params_buffer, enabled: true }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
position_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
params: &RtPointShadowParams,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("RT Point Shadow BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(position_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("RT Point Shadow Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_params_size_aligned() {
|
||||
let size = std::mem::size_of::<RtPointShadowParams>();
|
||||
assert_eq!(size % 16, 0, "RtPointShadowParams size must be 16-byte aligned, got {}", size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_default() {
|
||||
let p = RtPointShadowParams::new();
|
||||
assert_eq!(p.light_position, [0.0; 3]);
|
||||
assert!((p.radius - 25.0).abs() < 1e-6);
|
||||
assert_eq!(p.width, 0);
|
||||
assert_eq!(p.height, 0);
|
||||
}
|
||||
}
|
||||
21
crates/voltex_renderer/src/rt_point_shadow.wgsl
Normal file
21
crates/voltex_renderer/src/rt_point_shadow.wgsl
Normal file
@@ -0,0 +1,21 @@
|
||||
struct RtPointShadowParams {
|
||||
light_position: vec3<f32>,
|
||||
radius: f32,
|
||||
width: u32,
|
||||
height: u32,
|
||||
_pad: vec2<u32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var position_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var output_tex: texture_storage_2d<r32float, write>;
|
||||
@group(0) @binding(2) var<uniform> params: RtPointShadowParams;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(position_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
|
||||
// Placeholder: output 1.0 (no shadow) for infrastructure validation
|
||||
textureStore(output_tex, pos, vec4<f32>(1.0, 0.0, 0.0, 0.0));
|
||||
}
|
||||
160
crates/voltex_renderer/src/rt_reflections.rs
Normal file
160
crates/voltex_renderer/src/rt_reflections.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
/// Uniform parameters for the RT reflections compute pass.
|
||||
///
|
||||
/// Layout (96 bytes, 16-byte aligned):
|
||||
/// view_proj: mat4x4 → 64 bytes
|
||||
/// camera_pos [f32; 3] + max_distance: f32 → 16 bytes
|
||||
/// num_samples: u32 + _pad: [u32; 3] → 16 bytes
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
|
||||
pub struct RtReflParams {
|
||||
pub view_proj: [[f32; 4]; 4],
|
||||
pub camera_pos: [f32; 3],
|
||||
pub max_distance: f32,
|
||||
pub num_samples: u32,
|
||||
pub _pad: [u32; 3],
|
||||
}
|
||||
|
||||
impl RtReflParams {
|
||||
pub fn new() -> Self {
|
||||
RtReflParams {
|
||||
view_proj: [[0.0; 4]; 4],
|
||||
camera_pos: [0.0; 3],
|
||||
max_distance: 50.0,
|
||||
num_samples: 32,
|
||||
_pad: [0; 3],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RT Reflections compute pipeline.
|
||||
pub struct RtReflections {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl RtReflections {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("RT Reflections Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("rt_reflections.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("RT Reflections BGL"),
|
||||
entries: &[
|
||||
// binding 0: position texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 1: normal texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 2: material texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 3: color texture (lit scene)
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 4: output storage texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: wgpu::TextureFormat::Rgba16Float, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
// binding 5: params uniform
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 5, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("RT Reflections PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("RT Reflections Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("RT Reflections Params"),
|
||||
size: std::mem::size_of::<RtReflParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
RtReflections { pipeline, bind_group_layout, params_buffer, enabled: true }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
position_view: &wgpu::TextureView,
|
||||
normal_view: &wgpu::TextureView,
|
||||
material_view: &wgpu::TextureView,
|
||||
color_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
params: &RtReflParams,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("RT Reflections BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(position_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(normal_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(material_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: wgpu::BindingResource::TextureView(color_view) },
|
||||
wgpu::BindGroupEntry { binding: 4, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 5, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("RT Reflections Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_params_size_aligned() {
|
||||
let size = std::mem::size_of::<RtReflParams>();
|
||||
assert_eq!(size % 16, 0, "RtReflParams size must be 16-byte aligned, got {}", size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_default() {
|
||||
let p = RtReflParams::new();
|
||||
assert!((p.max_distance - 50.0).abs() < 1e-6);
|
||||
assert_eq!(p.num_samples, 32);
|
||||
assert_eq!(p.camera_pos, [0.0; 3]);
|
||||
}
|
||||
}
|
||||
61
crates/voltex_renderer/src/rt_reflections.wgsl
Normal file
61
crates/voltex_renderer/src/rt_reflections.wgsl
Normal file
@@ -0,0 +1,61 @@
|
||||
struct RtReflParams {
|
||||
view_proj: mat4x4<f32>,
|
||||
camera_pos: vec3<f32>,
|
||||
max_distance: f32,
|
||||
num_samples: u32,
|
||||
_pad: vec3<u32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var position_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var normal_tex: texture_2d<f32>;
|
||||
@group(0) @binding(2) var material_tex: texture_2d<f32>;
|
||||
@group(0) @binding(3) var color_tex: texture_2d<f32>;
|
||||
@group(0) @binding(4) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(5) var<uniform> params: RtReflParams;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(position_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let world_pos = textureLoad(position_tex, pos, 0).xyz;
|
||||
let normal = normalize(textureLoad(normal_tex, pos, 0).xyz);
|
||||
let material = textureLoad(material_tex, pos, 0);
|
||||
let roughness = material.g;
|
||||
|
||||
if (dot(world_pos, world_pos) < 0.001) {
|
||||
textureStore(output_tex, pos, vec4<f32>(0.0));
|
||||
return;
|
||||
}
|
||||
|
||||
let view_dir = normalize(world_pos - params.camera_pos);
|
||||
let reflect_dir = reflect(view_dir, normal);
|
||||
|
||||
// Fade by roughness (rough surfaces = less reflection)
|
||||
let fade = 1.0 - roughness;
|
||||
|
||||
// March along reflection ray in world space (simplified)
|
||||
var hit_color = vec4<f32>(0.0);
|
||||
var ray_pos = world_pos + reflect_dir * 0.1;
|
||||
let step = params.max_distance / f32(params.num_samples);
|
||||
|
||||
for (var i = 0u; i < params.num_samples; i++) {
|
||||
let clip = params.view_proj * vec4<f32>(ray_pos, 1.0);
|
||||
let ndc = clip.xyz / clip.w;
|
||||
let screen_uv = vec2<f32>(ndc.x * 0.5 + 0.5, 1.0 - (ndc.y * 0.5 + 0.5));
|
||||
let sx = i32(screen_uv.x * f32(dims.x));
|
||||
let sy = i32(screen_uv.y * f32(dims.y));
|
||||
|
||||
if (sx >= 0 && sy >= 0 && sx < i32(dims.x) && sy < i32(dims.y) && ndc.z > 0.0 && ndc.z < 1.0) {
|
||||
let sample_world = textureLoad(position_tex, vec2<i32>(sx, sy), 0).xyz;
|
||||
let dist_to_surface = length(ray_pos - sample_world);
|
||||
if (dist_to_surface < step * 1.5) {
|
||||
hit_color = textureLoad(color_tex, vec2<i32>(sx, sy), 0) * fade;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ray_pos += reflect_dir * step;
|
||||
}
|
||||
|
||||
textureStore(output_tex, pos, hit_color);
|
||||
}
|
||||
177
crates/voltex_renderer/src/ssr.rs
Normal file
177
crates/voltex_renderer/src/ssr.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
pub struct SsrParams {
|
||||
pub view_proj: [[f32; 4]; 4],
|
||||
pub inv_view_proj: [[f32; 4]; 4],
|
||||
pub camera_pos: [f32; 3],
|
||||
pub max_steps: u32,
|
||||
pub step_size: f32,
|
||||
pub thickness: f32,
|
||||
pub _pad: [f32; 2],
|
||||
}
|
||||
|
||||
impl SsrParams {
|
||||
pub fn new() -> Self {
|
||||
SsrParams {
|
||||
view_proj: [[0.0; 4]; 4],
|
||||
inv_view_proj: [[0.0; 4]; 4],
|
||||
camera_pos: [0.0; 3],
|
||||
max_steps: 64,
|
||||
step_size: 0.1,
|
||||
thickness: 0.05,
|
||||
_pad: [0.0; 2],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Ssr {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl Ssr {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("SSR Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("ssr.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("SSR BGL"),
|
||||
entries: &[
|
||||
// binding 0: position texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 1: normal texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 2: color texture (lit scene)
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
// binding 3: output
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: wgpu::TextureFormat::Rgba16Float, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
// binding 4: params uniform
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("SSR PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("SSR Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("SSR Params"),
|
||||
size: std::mem::size_of::<SsrParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
Ssr { pipeline, bind_group_layout, params_buffer, enabled: true }
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
position_view: &wgpu::TextureView,
|
||||
normal_view: &wgpu::TextureView,
|
||||
color_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
params: &SsrParams,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[*params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("SSR BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(position_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(normal_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(color_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 4, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("SSR Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Reflect a vector about a normal (CPU version for testing).
|
||||
pub fn reflect(incident: [f32; 3], normal: [f32; 3]) -> [f32; 3] {
|
||||
let d = 2.0 * (incident[0]*normal[0] + incident[1]*normal[1] + incident[2]*normal[2]);
|
||||
[
|
||||
incident[0] - d * normal[0],
|
||||
incident[1] - d * normal[1],
|
||||
incident[2] - d * normal[2],
|
||||
]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_reflect_horizontal() {
|
||||
// Ray going down, reflected off horizontal surface (normal up)
|
||||
let r = reflect([0.0, -1.0, 0.0], [0.0, 1.0, 0.0]);
|
||||
assert!((r[0] - 0.0).abs() < 1e-6);
|
||||
assert!((r[1] - 1.0).abs() < 1e-6);
|
||||
assert!((r[2] - 0.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reflect_45_degrees() {
|
||||
let s = std::f32::consts::FRAC_1_SQRT_2;
|
||||
let r = reflect([s, -s, 0.0], [0.0, 1.0, 0.0]);
|
||||
assert!((r[0] - s).abs() < 1e-5);
|
||||
assert!((r[1] - s).abs() < 1e-5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ssr_params_default() {
|
||||
let p = SsrParams::new();
|
||||
assert_eq!(p.max_steps, 64);
|
||||
assert!((p.step_size - 0.1).abs() < 1e-6);
|
||||
assert!((p.thickness - 0.05).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ssr_params_size() {
|
||||
// Must be aligned for uniform buffer
|
||||
let size = std::mem::size_of::<SsrParams>();
|
||||
assert_eq!(size % 16, 0, "SsrParams size must be 16-byte aligned, got {}", size);
|
||||
}
|
||||
}
|
||||
82
crates/voltex_renderer/src/ssr.wgsl
Normal file
82
crates/voltex_renderer/src/ssr.wgsl
Normal file
@@ -0,0 +1,82 @@
|
||||
struct SsrParams {
|
||||
view_proj: mat4x4<f32>,
|
||||
inv_view_proj: mat4x4<f32>,
|
||||
camera_pos: vec3<f32>,
|
||||
max_steps: u32,
|
||||
step_size: f32,
|
||||
thickness: f32,
|
||||
_pad: vec2<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var position_tex: texture_2d<f32>; // G-Buffer world position
|
||||
@group(0) @binding(1) var normal_tex: texture_2d<f32>; // G-Buffer world normal
|
||||
@group(0) @binding(2) var color_tex: texture_2d<f32>; // Lit HDR color
|
||||
@group(0) @binding(3) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(4) var<uniform> params: SsrParams;
|
||||
|
||||
fn world_to_screen(world_pos: vec3<f32>) -> vec3<f32> {
|
||||
let clip = params.view_proj * vec4<f32>(world_pos, 1.0);
|
||||
let ndc = clip.xyz / clip.w;
|
||||
let dims = vec2<f32>(textureDimensions(color_tex));
|
||||
return vec3<f32>(
|
||||
(ndc.x * 0.5 + 0.5) * dims.x,
|
||||
(1.0 - (ndc.y * 0.5 + 0.5)) * dims.y,
|
||||
ndc.z,
|
||||
);
|
||||
}
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(position_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let world_pos = textureLoad(position_tex, pos, 0).xyz;
|
||||
let normal = normalize(textureLoad(normal_tex, pos, 0).xyz);
|
||||
|
||||
// Skip pixels with no geometry (position = 0)
|
||||
if (dot(world_pos, world_pos) < 0.001) {
|
||||
textureStore(output_tex, pos, vec4<f32>(0.0));
|
||||
return;
|
||||
}
|
||||
|
||||
// Reflect view direction
|
||||
let view_dir = normalize(world_pos - params.camera_pos);
|
||||
let reflect_dir = reflect(view_dir, normal);
|
||||
|
||||
// Ray march in world space, project to screen each step
|
||||
var ray_pos = world_pos + reflect_dir * params.step_size;
|
||||
var hit_color = vec4<f32>(0.0);
|
||||
|
||||
for (var i = 0u; i < params.max_steps; i++) {
|
||||
let screen = world_to_screen(ray_pos);
|
||||
let sx = i32(screen.x);
|
||||
let sy = i32(screen.y);
|
||||
|
||||
// Bounds check
|
||||
if (sx < 0 || sy < 0 || sx >= i32(dims.x) || sy >= i32(dims.y) || screen.z < 0.0 || screen.z > 1.0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Compare depth
|
||||
let sample_pos = textureLoad(position_tex, vec2<i32>(sx, sy), 0).xyz;
|
||||
let sample_screen = world_to_screen(sample_pos);
|
||||
let depth_diff = screen.z - sample_screen.z;
|
||||
|
||||
if (depth_diff > 0.0 && depth_diff < params.thickness) {
|
||||
// Hit! Sample the color
|
||||
hit_color = textureLoad(color_tex, vec2<i32>(sx, sy), 0);
|
||||
// Fade at edges
|
||||
let edge_fade = 1.0 - max(
|
||||
abs(f32(sx) / f32(dims.x) * 2.0 - 1.0),
|
||||
abs(f32(sy) / f32(dims.y) * 2.0 - 1.0),
|
||||
);
|
||||
hit_color = hit_color * clamp(edge_fade, 0.0, 1.0);
|
||||
break;
|
||||
}
|
||||
|
||||
ray_pos += reflect_dir * params.step_size;
|
||||
}
|
||||
|
||||
textureStore(output_tex, pos, hit_color);
|
||||
}
|
||||
179
crates/voltex_renderer/src/taa.rs
Normal file
179
crates/voltex_renderer/src/taa.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
struct TaaParams {
|
||||
jitter: [f32; 2],
|
||||
prev_jitter: [f32; 2],
|
||||
blend_factor: f32,
|
||||
_pad: [f32; 3],
|
||||
}
|
||||
|
||||
pub struct Taa {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
jitter_index: usize,
|
||||
prev_jitter: [f32; 2],
|
||||
pub blend_factor: f32,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
/// Halton sequence for jitter offsets (base 2 and 3).
|
||||
pub fn halton(index: usize, base: usize) -> f32 {
|
||||
let mut result = 0.0_f32;
|
||||
let mut f = 1.0 / base as f32;
|
||||
let mut i = index;
|
||||
while i > 0 {
|
||||
result += f * (i % base) as f32;
|
||||
i /= base;
|
||||
f /= base as f32;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Generate jitter offset for frame N (in pixel units, centered around 0).
|
||||
pub fn jitter_offset(frame: usize) -> [f32; 2] {
|
||||
let idx = (frame % 16) + 1; // avoid index 0
|
||||
[halton(idx, 2) - 0.5, halton(idx, 3) - 0.5]
|
||||
}
|
||||
|
||||
impl Taa {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("TAA Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("taa.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("TAA BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture { access: wgpu::StorageTextureAccess::WriteOnly, format: wgpu::TextureFormat::Rgba16Float, view_dimension: wgpu::TextureViewDimension::D2 },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("TAA PL"), bind_group_layouts: &[&bind_group_layout], immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("TAA Pipeline"), layout: Some(&pipeline_layout),
|
||||
module: &shader, entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(), cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("TAA Params"), size: std::mem::size_of::<TaaParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
Taa {
|
||||
pipeline, bind_group_layout, params_buffer,
|
||||
jitter_index: 0, prev_jitter: [0.0; 2],
|
||||
blend_factor: 0.1, enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current jitter and advance frame index.
|
||||
pub fn next_jitter(&mut self) -> [f32; 2] {
|
||||
self.prev_jitter = jitter_offset(self.jitter_index);
|
||||
self.jitter_index += 1;
|
||||
let current = jitter_offset(self.jitter_index);
|
||||
current
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
current_view: &wgpu::TextureView,
|
||||
history_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
jitter: [f32; 2],
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
let params = TaaParams {
|
||||
jitter: [jitter[0] / width as f32, jitter[1] / height as f32],
|
||||
prev_jitter: [self.prev_jitter[0] / width as f32, self.prev_jitter[1] / height as f32],
|
||||
blend_factor: self.blend_factor,
|
||||
_pad: [0.0; 3],
|
||||
};
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("TAA BG"), layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(current_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(history_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("TAA Pass"), timestamp_writes: None });
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_halton_base2() {
|
||||
assert!((halton(1, 2) - 0.5).abs() < 1e-6);
|
||||
assert!((halton(2, 2) - 0.25).abs() < 1e-6);
|
||||
assert!((halton(3, 2) - 0.75).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_halton_base3() {
|
||||
assert!((halton(1, 3) - 1.0/3.0).abs() < 1e-6);
|
||||
assert!((halton(2, 3) - 2.0/3.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jitter_offset_centered() {
|
||||
for i in 0..16 {
|
||||
let j = jitter_offset(i);
|
||||
assert!(j[0] >= -0.5 && j[0] <= 0.5, "jitter x out of range: {}", j[0]);
|
||||
assert!(j[1] >= -0.5 && j[1] <= 0.5, "jitter y out of range: {}", j[1]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jitter_varies() {
|
||||
let j0 = jitter_offset(0);
|
||||
let j1 = jitter_offset(1);
|
||||
assert!(j0 != j1, "consecutive jitters should differ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_halton_zero() {
|
||||
assert!((halton(0, 2) - 0.0).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
52
crates/voltex_renderer/src/taa.wgsl
Normal file
52
crates/voltex_renderer/src/taa.wgsl
Normal file
@@ -0,0 +1,52 @@
|
||||
struct TaaParams {
|
||||
jitter: vec2<f32>, // current frame jitter offset
|
||||
prev_jitter: vec2<f32>, // previous frame jitter offset
|
||||
blend_factor: f32, // 0.1 (10% new, 90% history)
|
||||
_pad: vec3<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var current_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var history_tex: texture_2d<f32>;
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(3) var<uniform> params: TaaParams;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(current_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let current = textureLoad(current_tex, pos, 0);
|
||||
|
||||
// Neighborhood color clamping (3x3 AABB)
|
||||
var color_min = vec4<f32>(999.0);
|
||||
var color_max = vec4<f32>(-999.0);
|
||||
for (var dy = -1; dy <= 1; dy++) {
|
||||
for (var dx = -1; dx <= 1; dx++) {
|
||||
let np = pos + vec2<i32>(dx, dy);
|
||||
let nc = textureLoad(current_tex, clamp(np, vec2<i32>(0), vec2<i32>(dims) - 1), 0);
|
||||
color_min = min(color_min, nc);
|
||||
color_max = max(color_max, nc);
|
||||
}
|
||||
}
|
||||
|
||||
// Reprojection: unjitter to find history position
|
||||
let uv = (vec2<f32>(gid.xy) + 0.5) / vec2<f32>(dims);
|
||||
let history_uv = uv + params.prev_jitter - params.jitter;
|
||||
let history_pos = vec2<i32>(history_uv * vec2<f32>(dims));
|
||||
|
||||
var history: vec4<f32>;
|
||||
if (history_pos.x >= 0 && history_pos.y >= 0 &&
|
||||
history_pos.x < i32(dims.x) && history_pos.y < i32(dims.y)) {
|
||||
history = textureLoad(history_tex, history_pos, 0);
|
||||
} else {
|
||||
history = current;
|
||||
}
|
||||
|
||||
// Clamp history to neighborhood
|
||||
let clamped_history = clamp(history, color_min, color_max);
|
||||
|
||||
// Blend
|
||||
let result = mix(clamped_history, current, params.blend_factor);
|
||||
textureStore(output_tex, pos, result);
|
||||
}
|
||||
153
crates/voltex_renderer/src/temporal_accum.rs
Normal file
153
crates/voltex_renderer/src/temporal_accum.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
struct TemporalParams {
|
||||
blend_factor: f32,
|
||||
_pad: [f32; 3],
|
||||
}
|
||||
|
||||
pub struct TemporalAccumulation {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
params_buffer: wgpu::Buffer,
|
||||
pub blend_factor: f32,
|
||||
}
|
||||
|
||||
impl TemporalAccumulation {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Temporal Accumulation Compute"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("temporal_accum.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Temporal Accum BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: false } },
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::WriteOnly,
|
||||
format: wgpu::TextureFormat::Rgba16Float,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3, visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None },
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Temporal Accum PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("Temporal Accum Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
module: &shader,
|
||||
entry_point: Some("main"),
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let params_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Temporal Params"),
|
||||
size: std::mem::size_of::<TemporalParams>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
TemporalAccumulation {
|
||||
pipeline,
|
||||
bind_group_layout,
|
||||
params_buffer,
|
||||
blend_factor: 0.1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dispatch(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
current_view: &wgpu::TextureView,
|
||||
history_view: &wgpu::TextureView,
|
||||
output_view: &wgpu::TextureView,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) {
|
||||
let params = TemporalParams { blend_factor: self.blend_factor, _pad: [0.0; 3] };
|
||||
queue.write_buffer(&self.params_buffer, 0, bytemuck::cast_slice(&[params]));
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Temporal Accum BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(current_view) },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(history_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(output_view) },
|
||||
wgpu::BindGroupEntry { binding: 3, resource: self.params_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("Temporal Accum Pass"),
|
||||
timestamp_writes: None,
|
||||
});
|
||||
cpass.set_pipeline(&self.pipeline);
|
||||
cpass.set_bind_group(0, &bind_group, &[]);
|
||||
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Pure CPU temporal blend (for testing).
|
||||
pub fn temporal_blend(history: f32, current: f32, factor: f32) -> f32 {
|
||||
history * (1.0 - factor) + current * factor
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_blend_factor_zero() {
|
||||
let result = temporal_blend(0.5, 1.0, 0.0);
|
||||
assert!((result - 0.5).abs() < 1e-6); // all history
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blend_factor_one() {
|
||||
let result = temporal_blend(0.5, 1.0, 1.0);
|
||||
assert!((result - 1.0).abs() < 1e-6); // all current
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blend_factor_half() {
|
||||
let result = temporal_blend(0.0, 1.0, 0.5);
|
||||
assert!((result - 0.5).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blend_default() {
|
||||
// Default factor = 0.1: 90% history + 10% current
|
||||
let result = temporal_blend(0.8, 0.2, 0.1);
|
||||
let expected = 0.8 * 0.9 + 0.2 * 0.1;
|
||||
assert!((result - expected).abs() < 1e-6);
|
||||
}
|
||||
}
|
||||
22
crates/voltex_renderer/src/temporal_accum.wgsl
Normal file
22
crates/voltex_renderer/src/temporal_accum.wgsl
Normal file
@@ -0,0 +1,22 @@
|
||||
struct Params {
|
||||
blend_factor: f32,
|
||||
_pad: vec3<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var current_tex: texture_2d<f32>;
|
||||
@group(0) @binding(1) var history_tex: texture_2d<f32>;
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
@group(0) @binding(3) var<uniform> params: Params;
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(current_tex);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let pos = vec2<i32>(gid.xy);
|
||||
let current = textureLoad(current_tex, pos, 0);
|
||||
let history = textureLoad(history_tex, pos, 0);
|
||||
|
||||
let result = mix(history, current, params.blend_factor);
|
||||
textureStore(output_tex, pos, result);
|
||||
}
|
||||
@@ -5,8 +5,8 @@
|
||||
- ~~**PNG 디코더 자체 구현**~~ ✅ 완료.
|
||||
- ~~**JPG 디코더 자체 구현**~~ ✅ Baseline JPEG 완료.
|
||||
- ~~**glTF 파서**~~ ✅ glTF 2.0 / GLB 완료.
|
||||
- **JPG Progressive** — Baseline만 지원. Progressive JPEG 미구현.
|
||||
- **glTF 애니메이션/스킨** — 메시+머티리얼만 지원.
|
||||
- ~~**JPG Progressive**~~ ✅ SOF2 감지 + 스캔 파라미터 파싱 + DCT 계수 병합 완료.
|
||||
- ~~**glTF 애니메이션/스킨 파싱**~~ ✅ nodes/skins/animations/JOINTS_0/WEIGHTS_0 파싱 완료. **스킨드 렌더링 + 애니메이션 시스템** 미구현.
|
||||
|
||||
## Phase 3a
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- ~~**시스템 스케줄러**~~ ✅ 순서 기반 Scheduler 완료.
|
||||
- ~~**쿼리 필터**~~ ✅ With/Without 완료.
|
||||
- ~~**query3+**~~ ✅ query3, query4 완료.
|
||||
- **Changed 필터** — 컴포넌트 변경 감지 미구현.
|
||||
- ~~**Changed 필터**~~ ✅ tick 기반 변경 감지, query_changed, clear_changed 완료.
|
||||
- **의존성 기반 스케줄러** — 읽기/쓰기 의존성 자동 정렬/병렬 실행 미구현.
|
||||
|
||||
## Phase 3b
|
||||
@@ -48,7 +48,7 @@
|
||||
|
||||
## Phase 5-1
|
||||
|
||||
- ~~**Capsule, Convex Hull 콜라이더**~~ ✅ Capsule + GJK/EPA 완료. **Convex Hull 미구현.**
|
||||
- ~~**Capsule, Convex Hull 콜라이더**~~ ✅ Capsule + ConvexHull + GJK/EPA 완료.
|
||||
- ~~**OBB (회전된 박스) 충돌**~~ ✅ GJK/EPA로 대체 완료.
|
||||
- ~~**Incremental BVH 업데이트**~~ ✅ refit() 완료.
|
||||
- ~~**연속 충돌 감지 (CCD)**~~ ✅ swept_sphere_vs_aabb 완료.
|
||||
@@ -63,7 +63,7 @@
|
||||
## Phase 5-3
|
||||
|
||||
- ~~**Ray vs Triangle**~~ ✅ Möller–Trumbore 완료.
|
||||
- **Ray vs Plane, Mesh** — 삼각형 단위만 지원. 전체 메시 순회 레이캐스트 미구현.
|
||||
- ~~**Ray vs Mesh**~~ ✅ MeshCollider + ray_vs_mesh + ray_vs_mesh_all 완료. **Ray vs Plane** 미구현.
|
||||
- ~~**raycast_all (다중 hit)**~~ ✅ 거리순 정렬 완료.
|
||||
- ~~**BVH 조기 종료 최적화**~~ ✅ 재귀 트리 순회 + query_ray 완료.
|
||||
|
||||
@@ -72,15 +72,15 @@
|
||||
- **macOS/Linux 백엔드** — WASAPI(Windows)만 구현.
|
||||
- ~~**OGG/Vorbis 디코더**~~ ✅ OGG 컨테이너 + Vorbis 디코더 완료.
|
||||
- ~~**24-bit/32-bit WAV**~~ ✅ 완료.
|
||||
- **ECS 통합** — AudioSource 컴포넌트 미구현.
|
||||
- ~~**ECS 통합**~~ ✅ AudioSource 컴포넌트 완료. audio_sync_system 미구현.
|
||||
- **비동기 로딩** — 동기 로딩만.
|
||||
|
||||
## Phase 6-2
|
||||
|
||||
- ~~**도플러 효과**~~ ✅ doppler_shift 완료.
|
||||
- **HRTF** — 미구현.
|
||||
- **Reverb/Echo** — 미구현.
|
||||
- **Occlusion** — 미구현.
|
||||
- ~~**HRTF**~~ ✅ ITD/ILD 기반 간소화 HRTF 완료.
|
||||
- ~~**Reverb/Echo**~~ ✅ Schroeder reverb (4 comb + 2 allpass) + Echo delay line 완료.
|
||||
- ~~**Occlusion**~~ ✅ 레이 기반 차폐 + 로우패스 필터 완료.
|
||||
|
||||
## Phase 6-3
|
||||
|
||||
@@ -91,33 +91,34 @@
|
||||
|
||||
## Phase 7-1
|
||||
|
||||
- **투명 오브젝트** — 디퍼드에서 처리 불가. 별도 포워드 패스 필요.
|
||||
- ~~**투명 오브젝트**~~ ✅ ForwardPass (알파 블렌딩, HDR 타겟, depth read-only) + back-to-front 정렬 완료. deferred_demo 통합 미완료.
|
||||
- **G-Buffer 압축** — 미적용.
|
||||
- **Light Volumes** — 풀스크린 라이팅만.
|
||||
- **Stencil 최적화** — 미구현.
|
||||
|
||||
## Phase 7-2
|
||||
|
||||
- **Bilateral Blur** — SSGI 노이즈 제거 블러 미구현.
|
||||
- ~~**Bilateral Blur**~~ ✅ depth/normal edge-aware 컴퓨트 셰이더 완료.
|
||||
- **반해상도 렌더링** — 풀 해상도 SSGI.
|
||||
- **Temporal Accumulation** — 미구현.
|
||||
- ~~**Temporal Accumulation**~~ ✅ 히스토리 블렌딩 컴퓨트 셰이더 완료.
|
||||
- **Light Probes** — 미구현.
|
||||
|
||||
## Phase 7-3
|
||||
|
||||
- **RT Reflections** — 미구현.
|
||||
- **RT AO** — 미구현.
|
||||
- **Point/Spot Light RT shadows** — Directional만 구현.
|
||||
- ~~**RT Reflections**~~ ✅ 컴퓨트 셰이더 (G-Buffer 기반 반사 레이 마칭) 완료.
|
||||
- ~~**RT AO**~~ ✅ 컴퓨트 셰이더 (코사인 가중 반구 샘플링) 완료.
|
||||
- ~~**Point/Spot Light RT shadows**~~ ✅ 인프라 + 컴퓨트 셰이더 완료.
|
||||
- **Soft RT shadows** — 단일 ray만.
|
||||
- **BLAS 업데이트** — 정적 지오메트리만.
|
||||
- **Fallback** — RT 미지원 GPU 폴백 미구현.
|
||||
|
||||
## Phase 7-4
|
||||
|
||||
- **TAA** — 미구현.
|
||||
- **SSR** — 미구현.
|
||||
- **Motion Blur, DOF** — 미구현.
|
||||
- **Auto Exposure** — 고정 exposure만.
|
||||
- ~~**TAA**~~ ✅ Halton jitter + neighborhood clamping + history blending 컴퓨트 셰이더 완료.
|
||||
- ~~**SSR**~~ ✅ 스크린 스페이스 레이 마칭 + edge fade 컴퓨트 셰이더 완료.
|
||||
- ~~**Motion Blur**~~ ✅ 카메라 속도 기반 컴퓨트 셰이더 완료.
|
||||
- ~~**DOF**~~ ✅ Circle-of-confusion 기반 컴퓨트 셰이더 완료.
|
||||
- ~~**Auto Exposure**~~ ✅ 컴퓨트 셰이더 luminance + 적응형 노출 계산 완료. deferred_demo 통합 미완료.
|
||||
- **Bilateral Bloom Blur** — 단순 tent filter.
|
||||
|
||||
## Phase 8-1
|
||||
@@ -125,16 +126,16 @@
|
||||
- ~~**자동 내비메시 생성**~~ ✅ 복셀화 기반 NavMeshBuilder 완료.
|
||||
- ~~**String Pulling (Funnel)**~~ ✅ SSF 알고리즘 완료.
|
||||
- ~~**동적 장애물 회피**~~ ✅ velocity obstacle 기반 완료.
|
||||
- **ECS 통합** — AI 컴포넌트 미구현.
|
||||
- **내비메시 직렬화** — 미구현.
|
||||
- ~~**ECS 통합**~~ ✅ NavAgent 컴포넌트 완료. nav_agent_system 미구현.
|
||||
- ~~**내비메시 직렬화**~~ ✅ VNAV 바이너리 포맷 serialize/deserialize 완료.
|
||||
|
||||
## Phase 8-2
|
||||
|
||||
- ~~**상태 동기화 (스냅샷)**~~ ✅ Snapshot + delta 압축 완료.
|
||||
- ~~**보간 / 예측**~~ ✅ InterpolationBuffer 완료.
|
||||
- **지연 보상** — 미구현.
|
||||
- ~~**지연 보상**~~ ✅ LagCompensation (history rewind + interpolation) 완료.
|
||||
- ~~**신뢰성 계층**~~ ✅ ReliableChannel + OrderedChannel 완료.
|
||||
- **암호화 / 인증** — 미구현.
|
||||
- ~~**암호화 / 인증**~~ ✅ PacketCipher (XOR + anti-replay) + AuthToken 완료.
|
||||
|
||||
## Phase 8-3
|
||||
|
||||
@@ -146,15 +147,15 @@
|
||||
|
||||
## Phase 8-4
|
||||
|
||||
- **씬 뷰포트** — 3D 렌더러 임베드 미구현.
|
||||
- **엔티티 인스펙터** — ECS 컴포넌트 편집 미구현.
|
||||
- **에셋 브라우저** — 파일 시스템 탐색 미구현.
|
||||
- ~~**씬 뷰포트**~~ ✅ 오프스크린 렌더링 + 오빗 카메라 완료. Blinn-Phong forward만 (디퍼드/PBR 미지원).
|
||||
- ~~**엔티티 인스펙터**~~ ✅ Transform/Tag/Parent 편집 완료. 커스텀 컴포넌트 리플렉션 미구현.
|
||||
- ~~**에셋 브라우저**~~ ✅ 파일 목록 + 디렉토리 탐색 완료. 에셋 로딩/프리뷰 미구현.
|
||||
- ~~**텍스트 입력**~~ ✅ text_input 위젯 완료.
|
||||
- ~~**스크롤, 드래그앤드롭**~~ ✅ scroll_panel + drag/drop 완료.
|
||||
- **도킹, 탭, 윈도우 드래그** — 미구현.
|
||||
- **TTF 폰트** — 비트맵 고정폭만.
|
||||
- ~~**도킹, 탭**~~ ✅ DockTree (이진 분할, 리사이즈, 탭 전환) 완료. **플로팅 윈도우/탭 드래그 이동** 미구현.
|
||||
- ~~**TTF 폰트**~~ ✅ 자체 TTF 파서 + 래스터라이저 + 온디맨드 캐시 완료. 렌더러 통합(아틀라스 교체) 미완료.
|
||||
|
||||
## 렌더링 한계
|
||||
|
||||
- **per-entity dynamic UBO** — 수천 개 이상은 인스턴싱 필요.
|
||||
- ~~**per-entity dynamic UBO**~~ ✅ GPU 인스턴싱 (InstanceData, InstanceBuffer, instanced pipeline) 완료.
|
||||
- **max_bind_groups=4** — 리소스 합치기로 해결 중.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- voltex_renderer: Blinn-Phong shader, BMP texture loader, GpuTexture
|
||||
- voltex_renderer: PNG decoder (deflate + filter reconstruction)
|
||||
- voltex_renderer: JPG decoder (Baseline JPEG: Huffman, IDCT, MCU, YCbCr, subsampling)
|
||||
- voltex_renderer: glTF 2.0 / GLB parser (JSON parser, accessor extraction, PBR material)
|
||||
- voltex_renderer: glTF 2.0 / GLB parser (JSON parser, accessor extraction, PBR material, nodes/skins/animations)
|
||||
- examples/model_viewer
|
||||
|
||||
### Phase 3a: ECS
|
||||
@@ -121,6 +121,11 @@
|
||||
- voltex_editor: DrawList, UiContext, UiRenderer
|
||||
- voltex_editor: Widgets (text, button, slider, checkbox, panel, text_input, scroll_panel)
|
||||
- voltex_editor: Drag and Drop (begin_drag, drop_target)
|
||||
- voltex_editor: DockTree (binary split layout, resize, tab switching)
|
||||
- voltex_editor: OrbitCamera, ViewportTexture, ViewportRenderer (3D scene viewport)
|
||||
- voltex_editor: hierarchy_panel, inspector_panel (Transform/Tag/Parent editing)
|
||||
- voltex_editor: AssetBrowser (file system browsing, directory navigation)
|
||||
- voltex_editor: TtfParser, Rasterizer, GlyphCache, TtfFont (자체 TTF 렌더링, Unicode)
|
||||
- examples/editor_demo
|
||||
|
||||
## Crate 구조
|
||||
@@ -137,22 +142,22 @@ crates/
|
||||
├── voltex_ai — NavMesh(+builder), A*, Funnel, steering, obstacle avoidance
|
||||
├── voltex_net — UDP, NetServer/Client, Reliable/Ordered, Snapshot(delta), Interpolation
|
||||
├── voltex_script — Lua 5.4, LuaState, table interop, coroutine, sandbox, hot reload
|
||||
└── voltex_editor — IMGUI, text_input, scroll, drag&drop, UiRenderer
|
||||
└── voltex_editor — IMGUI, docking, 3D viewport, inspector, asset browser, TTF font, UiRenderer
|
||||
```
|
||||
|
||||
## 테스트: 485개 전부 통과
|
||||
## 테스트: 663개 전부 통과
|
||||
|
||||
- voltex_math: 37
|
||||
- voltex_platform: 3
|
||||
- voltex_ecs: 83
|
||||
- voltex_ecs: 91
|
||||
- voltex_asset: 22
|
||||
- voltex_renderer: 102
|
||||
- voltex_physics: 96
|
||||
- voltex_audio: 42
|
||||
- voltex_ai: 24
|
||||
- voltex_net: 36
|
||||
- voltex_renderer: 156
|
||||
- voltex_physics: 103
|
||||
- voltex_audio: 62
|
||||
- voltex_ai: 34
|
||||
- voltex_net: 47
|
||||
- voltex_script: 18
|
||||
- voltex_editor: 22
|
||||
- voltex_editor: 85
|
||||
|
||||
## Examples (12개 + survivor_game)
|
||||
|
||||
|
||||
259
docs/superpowers/plans/2026-03-26-asset-browser.md
Normal file
259
docs/superpowers/plans/2026-03-26-asset-browser.md
Normal file
@@ -0,0 +1,259 @@
|
||||
# Asset Browser Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add a file system asset browser panel to the editor that displays files/folders in a flat list with navigation.
|
||||
|
||||
**Architecture:** `AssetBrowser` struct manages directory state (current path, entries cache). `asset_browser_panel` function renders the UI. Uses `std::fs::read_dir` for file listing. No external dependencies needed.
|
||||
|
||||
**Tech Stack:** Rust, std::fs, voltex_editor (UiContext, Rect, LayoutState, widgets)
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-03-26-asset-browser-design.md`
|
||||
|
||||
---
|
||||
|
||||
### Task 1: AssetBrowser struct + logic + tests
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/asset_browser.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests and implementation**
|
||||
|
||||
Create `crates/voltex_editor/src/asset_browser.rs` with the full AssetBrowser implementation and tests. The struct handles directory scanning, navigation, and path safety.
|
||||
|
||||
Key implementation details:
|
||||
- `new(root)` → canonicalize root, set current=root, refresh
|
||||
- `refresh()` → read_dir, collect into Vec<DirEntry>, sort (dirs first, then by name)
|
||||
- `navigate_to(dir_name)` → join path, check starts_with(root) && is_dir, refresh
|
||||
- `go_up()` → parent path, check >= root, refresh
|
||||
- `relative_path()` → strip_prefix(root) display
|
||||
- `format_size(bytes)` → "B" / "KB" / "MB" formatting
|
||||
|
||||
Tests using `std::fs` temp directories:
|
||||
- test_new_scans_entries
|
||||
- test_navigate_to
|
||||
- test_go_up
|
||||
- test_go_up_at_root
|
||||
- test_root_guard
|
||||
- test_entries_sorted (dirs before files)
|
||||
- test_format_size
|
||||
|
||||
- [ ] **Step 2: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod asset_browser;
|
||||
pub use asset_browser::{AssetBrowser, asset_browser_panel};
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib asset_browser -- --nocapture`
|
||||
Expected: all tests PASS
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/asset_browser.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add AssetBrowser with directory navigation and file listing"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: asset_browser_panel UI function
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/asset_browser.rs`
|
||||
|
||||
- [ ] **Step 1: Write test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_panel_draws_commands() {
|
||||
let dir = std::env::temp_dir().join("voltex_ab_ui_test");
|
||||
let _ = std::fs::create_dir_all(&dir);
|
||||
std::fs::write(dir.join("test.txt"), "hello").unwrap();
|
||||
|
||||
let mut browser = AssetBrowser::new(dir.clone());
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
asset_browser_panel(&mut ui, &mut browser, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(ui.draw_list.commands.len() > 0);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&dir);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement asset_browser_panel**
|
||||
|
||||
```rust
|
||||
pub fn asset_browser_panel(
|
||||
ui: &mut UiContext,
|
||||
browser: &mut AssetBrowser,
|
||||
rect: &Rect,
|
||||
) {
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
|
||||
// Current path
|
||||
let path_text = format!("Path: /{}", browser.relative_path());
|
||||
ui.text(&path_text);
|
||||
|
||||
// Go up button (not at root)
|
||||
if browser.current != browser.root {
|
||||
if ui.button("[..]") {
|
||||
browser.go_up();
|
||||
return; // entries changed, redraw next frame
|
||||
}
|
||||
}
|
||||
|
||||
// Entry list
|
||||
let gw = ui.font.glyph_width as f32;
|
||||
let gh = ui.font.glyph_height as f32;
|
||||
let line_h = gh + PADDING;
|
||||
let mut clicked_dir: Option<String> = None;
|
||||
|
||||
for entry in browser.entries() {
|
||||
let y = ui.layout.cursor_y;
|
||||
let x = rect.x + PADDING;
|
||||
|
||||
// Highlight selected file
|
||||
if !entry.is_dir {
|
||||
if browser.selected_file.as_deref() == Some(&entry.name) {
|
||||
ui.draw_list.add_rect(rect.x, y, rect.w, line_h, COLOR_SELECTED);
|
||||
}
|
||||
}
|
||||
|
||||
// Click detection
|
||||
if ui.mouse_clicked && ui.mouse_in_rect(rect.x, y, rect.w, line_h) {
|
||||
if entry.is_dir {
|
||||
clicked_dir = Some(entry.name.clone());
|
||||
} else {
|
||||
browser.selected_file = Some(entry.name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Draw label
|
||||
let label = if entry.is_dir {
|
||||
format!("[D] {}", entry.name)
|
||||
} else {
|
||||
format!(" {}", entry.name)
|
||||
};
|
||||
|
||||
let text_y = y + (line_h - gh) * 0.5;
|
||||
let mut cx = x;
|
||||
for ch in label.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += gw;
|
||||
}
|
||||
|
||||
ui.layout.cursor_y += line_h;
|
||||
}
|
||||
|
||||
// Navigate after iteration (avoids borrow conflict)
|
||||
if let Some(dir) = clicked_dir {
|
||||
browser.navigate_to(&dir);
|
||||
}
|
||||
|
||||
// Selected file info
|
||||
if let Some(ref file_name) = browser.selected_file.clone() {
|
||||
ui.text("-- File Info --");
|
||||
ui.text(&format!("Name: {}", file_name));
|
||||
if let Some(entry) = browser.entries.iter().find(|e| e.name == *file_name) {
|
||||
ui.text(&format!("Size: {}", format_size(entry.size)));
|
||||
if let Some(ext) = file_name.rsplit('.').next() {
|
||||
if file_name.contains('.') {
|
||||
ui.text(&format!("Type: .{}", ext));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note: `selected_file` needs to be `pub(crate)` or accessed via a method. Also `entries` field needs to be accessible. Simplest: make `selected_file` and `entries` pub.
|
||||
|
||||
- [ ] **Step 3: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib asset_browser -- --nocapture`
|
||||
Expected: all tests PASS
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/asset_browser.rs
|
||||
git commit -m "feat(editor): add asset_browser_panel UI function"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Integrate into editor_demo
|
||||
|
||||
**Files:**
|
||||
- Modify: `examples/editor_demo/src/main.rs`
|
||||
|
||||
- [ ] **Step 1: Replace Console panel with AssetBrowser**
|
||||
|
||||
Add import:
|
||||
```rust
|
||||
use voltex_editor::{..., AssetBrowser, asset_browser_panel};
|
||||
```
|
||||
|
||||
Add to AppState:
|
||||
```rust
|
||||
asset_browser: AssetBrowser,
|
||||
```
|
||||
|
||||
In `resumed`, initialize:
|
||||
```rust
|
||||
let asset_browser = AssetBrowser::new(std::env::current_dir().unwrap_or_default());
|
||||
```
|
||||
|
||||
In the panel loop, replace panel 3 (Console):
|
||||
```rust
|
||||
3 => {
|
||||
asset_browser_panel(&mut state.ui, &mut state.asset_browser, rect);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Build and test**
|
||||
|
||||
Run: `cargo build -p editor_demo`
|
||||
Run: `cargo test -p voltex_editor`
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add examples/editor_demo/src/main.rs
|
||||
git commit -m "feat(editor): integrate asset browser into editor_demo"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Update docs
|
||||
|
||||
**Files:**
|
||||
- Modify: `docs/STATUS.md`
|
||||
- Modify: `docs/DEFERRED.md`
|
||||
|
||||
- [ ] **Step 1: Update STATUS.md**
|
||||
|
||||
Add: `- voltex_editor: AssetBrowser (file system browsing, directory navigation)`
|
||||
Update test count.
|
||||
|
||||
- [ ] **Step 2: Update DEFERRED.md**
|
||||
|
||||
```
|
||||
- ~~**에셋 브라우저**~~ ✅ 파일 목록 + 디렉토리 탐색 완료. 에셋 로딩/프리뷰 미구현.
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add docs/STATUS.md docs/DEFERRED.md
|
||||
git commit -m "docs: update STATUS.md and DEFERRED.md with asset browser"
|
||||
```
|
||||
715
docs/superpowers/plans/2026-03-26-editor-docking.md
Normal file
715
docs/superpowers/plans/2026-03-26-editor-docking.md
Normal file
@@ -0,0 +1,715 @@
|
||||
# Editor Docking System Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add a binary split tree docking system to voltex_editor with resizable panels and tab switching.
|
||||
|
||||
**Architecture:** Binary tree of Split/Leaf nodes. Split nodes divide space by ratio, Leaf nodes hold tabs. DockTree owns the tree, caches layout results, handles resize drag and tab clicks internally. No unsafe code — borrow issues resolved by collecting mutation targets before applying.
|
||||
|
||||
**Tech Stack:** Rust, existing voltex_editor IMGUI (UiContext, DrawList, FontAtlas, LayoutState)
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-03-26-editor-docking-design.md`
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Data model + layout algorithm
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/dock.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write failing tests for Rect and layout**
|
||||
|
||||
Add to `crates/voltex_editor/src/dock.rs`:
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rect_contains() {
|
||||
let r = Rect { x: 10.0, y: 20.0, w: 100.0, h: 50.0 };
|
||||
assert!(r.contains(50.0, 40.0));
|
||||
assert!(!r.contains(5.0, 40.0));
|
||||
assert!(!r.contains(50.0, 80.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_single_leaf() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::leaf(vec![0]),
|
||||
vec!["Panel0"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 1);
|
||||
assert_eq!(areas[0].0, 0);
|
||||
let r = &areas[0].1;
|
||||
assert!((r.y - 20.0).abs() < 1e-3);
|
||||
assert!((r.h - 280.0).abs() < 1e-3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_horizontal_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.25, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["Left", "Right"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 2);
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 100.0).abs() < 1e-3);
|
||||
let right = areas.iter().find(|(id, _)| *id == 1).unwrap();
|
||||
assert!((right.1.w - 300.0).abs() < 1e-3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_vertical_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["Top", "Bottom"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let top = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((top.1.h - 130.0).abs() < 1e-3); // 150 - 20 tab bar
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_nested_split() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(
|
||||
Axis::Horizontal, 0.25,
|
||||
DockNode::leaf(vec![0]),
|
||||
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![1]), DockNode::leaf(vec![2])),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layout_active_tab_only() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1, 2], active: 1 },
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_active_clamped_if_out_of_bounds() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0], active: 5 },
|
||||
vec!["A"],
|
||||
);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_empty_tabs_panics() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![], active: 0 },
|
||||
vec![],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock -- --nocapture`
|
||||
Expected: compilation errors (types don't exist)
|
||||
|
||||
- [ ] **Step 3: Implement data model and layout**
|
||||
|
||||
Write the implementation in `crates/voltex_editor/src/dock.rs`. Key design decisions:
|
||||
|
||||
- `layout_recursive` is a **free function** taking `&DockNode`, `Rect`, and `&mut Vec<LeafLayout>` / `&mut Vec<SplitLayout>` — avoids borrow issues with `&mut self` vs `&self.root`.
|
||||
- No `unsafe` code. No raw pointers.
|
||||
- `debug_assert!(!tabs.is_empty())` in `layout_recursive` for the empty tabs invariant.
|
||||
- `LeafLayout` includes `leaf_index: usize` for mutation targeting.
|
||||
|
||||
```rust
|
||||
const TAB_BAR_HEIGHT: f32 = 20.0;
|
||||
const MIN_RATIO: f32 = 0.1;
|
||||
const MAX_RATIO: f32 = 0.9;
|
||||
const RESIZE_HANDLE_HALF: f32 = 3.0;
|
||||
const GLYPH_W: f32 = 8.0;
|
||||
const TAB_PADDING: f32 = 8.0;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Rect {
|
||||
pub x: f32, pub y: f32, pub w: f32, pub h: f32,
|
||||
}
|
||||
|
||||
impl Rect {
|
||||
pub fn contains(&self, px: f32, py: f32) -> bool {
|
||||
px >= self.x && px < self.x + self.w && py >= self.y && py < self.y + self.h
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum Axis { Horizontal, Vertical }
|
||||
|
||||
pub enum DockNode {
|
||||
Leaf { tabs: Vec<u32>, active: usize },
|
||||
Split { axis: Axis, ratio: f32, children: [Box<DockNode>; 2] },
|
||||
}
|
||||
|
||||
impl DockNode {
|
||||
pub fn leaf(tabs: Vec<u32>) -> Self {
|
||||
DockNode::Leaf { tabs, active: 0 }
|
||||
}
|
||||
pub fn split(axis: Axis, ratio: f32, a: DockNode, b: DockNode) -> Self {
|
||||
DockNode::Split { axis, ratio: ratio.clamp(MIN_RATIO, MAX_RATIO), children: [Box::new(a), Box::new(b)] }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeafLayout {
|
||||
pub leaf_index: usize,
|
||||
pub tabs: Vec<u32>,
|
||||
pub active: usize,
|
||||
pub tab_bar_rect: Rect,
|
||||
pub content_rect: Rect,
|
||||
}
|
||||
|
||||
struct SplitLayout {
|
||||
rect: Rect,
|
||||
axis: Axis,
|
||||
boundary: f32,
|
||||
path: Vec<usize>,
|
||||
}
|
||||
|
||||
struct ResizeState {
|
||||
path: Vec<usize>,
|
||||
axis: Axis,
|
||||
origin: f32,
|
||||
size: f32,
|
||||
}
|
||||
|
||||
pub struct DockTree {
|
||||
root: DockNode,
|
||||
names: Vec<&'static str>,
|
||||
cached_leaves: Vec<LeafLayout>,
|
||||
cached_splits: Vec<SplitLayout>,
|
||||
resizing: Option<ResizeState>,
|
||||
prev_mouse_down: bool,
|
||||
}
|
||||
|
||||
fn layout_recursive(
|
||||
node: &DockNode,
|
||||
rect: Rect,
|
||||
path: &mut Vec<usize>,
|
||||
leaf_counter: &mut usize,
|
||||
leaves: &mut Vec<LeafLayout>,
|
||||
splits: &mut Vec<SplitLayout>,
|
||||
) {
|
||||
match node {
|
||||
DockNode::Leaf { tabs, active } => {
|
||||
debug_assert!(!tabs.is_empty(), "DockNode::Leaf must have at least one tab");
|
||||
let idx = *leaf_counter;
|
||||
*leaf_counter += 1;
|
||||
leaves.push(LeafLayout {
|
||||
leaf_index: idx,
|
||||
tabs: tabs.clone(),
|
||||
active: (*active).min(tabs.len() - 1),
|
||||
tab_bar_rect: Rect { x: rect.x, y: rect.y, w: rect.w, h: TAB_BAR_HEIGHT },
|
||||
content_rect: Rect { x: rect.x, y: rect.y + TAB_BAR_HEIGHT, w: rect.w, h: (rect.h - TAB_BAR_HEIGHT).max(0.0) },
|
||||
});
|
||||
}
|
||||
DockNode::Split { axis, ratio, children } => {
|
||||
let (r1, r2, boundary) = match axis {
|
||||
Axis::Horizontal => {
|
||||
let w1 = rect.w * ratio;
|
||||
let b = rect.x + w1;
|
||||
(Rect { x: rect.x, y: rect.y, w: w1, h: rect.h },
|
||||
Rect { x: b, y: rect.y, w: rect.w - w1, h: rect.h }, b)
|
||||
}
|
||||
Axis::Vertical => {
|
||||
let h1 = rect.h * ratio;
|
||||
let b = rect.y + h1;
|
||||
(Rect { x: rect.x, y: rect.y, w: rect.w, h: h1 },
|
||||
Rect { x: rect.x, y: b, w: rect.w, h: rect.h - h1 }, b)
|
||||
}
|
||||
};
|
||||
splits.push(SplitLayout { rect, axis: *axis, boundary, path: path.clone() });
|
||||
path.push(0);
|
||||
layout_recursive(&children[0], r1, path, leaf_counter, leaves, splits);
|
||||
path.pop();
|
||||
path.push(1);
|
||||
layout_recursive(&children[1], r2, path, leaf_counter, leaves, splits);
|
||||
path.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DockTree {
|
||||
pub fn new(root: DockNode, names: Vec<&'static str>) -> Self {
|
||||
DockTree { root, names, cached_leaves: Vec::new(), cached_splits: Vec::new(), resizing: None, prev_mouse_down: false }
|
||||
}
|
||||
|
||||
pub fn layout(&mut self, rect: Rect) -> Vec<(u32, Rect)> {
|
||||
self.cached_leaves.clear();
|
||||
self.cached_splits.clear();
|
||||
let mut path = Vec::new();
|
||||
let mut counter = 0;
|
||||
layout_recursive(&self.root, rect, &mut path, &mut counter, &mut self.cached_leaves, &mut self.cached_splits);
|
||||
self.cached_leaves.iter().map(|l| {
|
||||
let active = l.active.min(l.tabs.len().saturating_sub(1));
|
||||
(l.tabs[active], l.content_rect)
|
||||
}).collect()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod dock;
|
||||
pub use dock::{DockTree, DockNode, Axis, Rect, LeafLayout};
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Run tests to verify they pass**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock -- --nocapture`
|
||||
Expected: all 8 tests PASS
|
||||
|
||||
- [ ] **Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/dock.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add dock tree data model and layout algorithm"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: Tab click + resize drag (update method)
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/dock.rs`
|
||||
|
||||
- [ ] **Step 1: Write failing tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_tab_click_switches_active() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1, 2], active: 0 },
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
// Tab "A": w = 1*8+8 = 16, tab "B" starts at x=16. Click at x=20 (inside "B").
|
||||
dock.update(20.0, 10.0, true); // just_clicked detected here
|
||||
dock.update(20.0, 10.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tab_click_no_change_on_single_tab() {
|
||||
let mut dock = DockTree::new(DockNode::leaf(vec![0]), vec!["Only"]);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(10.0, 10.0, true);
|
||||
dock.update(10.0, 10.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
assert_eq!(areas[0].0, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_horizontal_drag() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["L", "R"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
// Boundary at x=200. Click on it, drag to x=120.
|
||||
dock.update(200.0, 150.0, true);
|
||||
dock.update(120.0, 150.0, true);
|
||||
dock.update(120.0, 150.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 120.0).abs() < 5.0, "left w={}", left.1.w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_clamps_ratio() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
|
||||
vec!["L", "R"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.update(200.0, 150.0, true);
|
||||
dock.update(5.0, 150.0, true);
|
||||
dock.update(5.0, 150.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
|
||||
assert!((left.1.w - 40.0).abs() < 1e-3, "left w={}", left.1.w); // 400*0.1=40
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_priority_over_tab_click() {
|
||||
// Resize handle should take priority when overlapping with tab bar
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::leaf(vec![2]),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
// Click right at the boundary (x=200) within tab bar height (y=10)
|
||||
dock.update(200.0, 10.0, true);
|
||||
// If resize took priority, resizing should be Some
|
||||
// Drag to verify ratio changes
|
||||
dock.update(180.0, 10.0, true);
|
||||
dock.update(180.0, 10.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
let left = areas.iter().find(|(id, _)| *id == 0 || *id == 1).unwrap();
|
||||
assert!((left.1.w - 180.0).abs() < 5.0, "resize should have priority, w={}", left.1.w);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock::tests::test_tab_click -- --nocapture`
|
||||
Expected: FAIL (update doesn't exist)
|
||||
|
||||
- [ ] **Step 3: Implement update method**
|
||||
|
||||
Key design: collect mutation target into a local variable, then apply outside the loop to avoid borrow conflicts.
|
||||
|
||||
```rust
|
||||
/// Possible mutation from update
|
||||
enum UpdateAction {
|
||||
None,
|
||||
StartResize(ResizeState),
|
||||
SetActiveTab { leaf_index: usize, new_active: usize },
|
||||
}
|
||||
|
||||
impl DockTree {
|
||||
pub fn update(&mut self, mouse_x: f32, mouse_y: f32, mouse_down: bool) {
|
||||
let just_clicked = mouse_down && !self.prev_mouse_down;
|
||||
self.prev_mouse_down = mouse_down;
|
||||
|
||||
// Active resize in progress
|
||||
if let Some(ref state) = self.resizing {
|
||||
if mouse_down {
|
||||
let new_ratio = match state.axis {
|
||||
Axis::Horizontal => (mouse_x - state.origin) / state.size,
|
||||
Axis::Vertical => (mouse_y - state.origin) / state.size,
|
||||
};
|
||||
let clamped = new_ratio.clamp(MIN_RATIO, MAX_RATIO);
|
||||
let path = state.path.clone();
|
||||
Self::set_ratio_at_path(&mut self.root, &path, clamped);
|
||||
} else {
|
||||
self.resizing = None;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if !just_clicked { return; }
|
||||
|
||||
// Determine action by reading cached layouts (immutable)
|
||||
let action = self.find_click_action(mouse_x, mouse_y);
|
||||
|
||||
// Apply action (mutable)
|
||||
match action {
|
||||
UpdateAction::StartResize(state) => { self.resizing = Some(state); }
|
||||
UpdateAction::SetActiveTab { leaf_index, new_active } => {
|
||||
Self::set_active_nth(&mut self.root, leaf_index, new_active, &mut 0);
|
||||
}
|
||||
UpdateAction::None => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_click_action(&self, mx: f32, my: f32) -> UpdateAction {
|
||||
// Check resize handles first (priority)
|
||||
for split in &self.cached_splits {
|
||||
let hit = match split.axis {
|
||||
Axis::Horizontal => mx >= split.boundary - RESIZE_HANDLE_HALF && mx <= split.boundary + RESIZE_HANDLE_HALF
|
||||
&& my >= split.rect.y && my < split.rect.y + split.rect.h,
|
||||
Axis::Vertical => my >= split.boundary - RESIZE_HANDLE_HALF && my <= split.boundary + RESIZE_HANDLE_HALF
|
||||
&& mx >= split.rect.x && mx < split.rect.x + split.rect.w,
|
||||
};
|
||||
if hit {
|
||||
let (origin, size) = match split.axis {
|
||||
Axis::Horizontal => (split.rect.x, split.rect.w),
|
||||
Axis::Vertical => (split.rect.y, split.rect.h),
|
||||
};
|
||||
return UpdateAction::StartResize(ResizeState { path: split.path.clone(), axis: split.axis, origin, size });
|
||||
}
|
||||
}
|
||||
|
||||
// Check tab bar clicks
|
||||
for leaf in &self.cached_leaves {
|
||||
if leaf.tabs.len() <= 1 { continue; }
|
||||
if !leaf.tab_bar_rect.contains(mx, my) { continue; }
|
||||
let mut tx = leaf.tab_bar_rect.x;
|
||||
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
|
||||
let name = self.names.get(panel_id as usize).map(|n| n.len()).unwrap_or(1);
|
||||
let tab_w = name as f32 * GLYPH_W + TAB_PADDING;
|
||||
if mx >= tx && mx < tx + tab_w {
|
||||
return UpdateAction::SetActiveTab { leaf_index: leaf.leaf_index, new_active: i };
|
||||
}
|
||||
tx += tab_w;
|
||||
}
|
||||
}
|
||||
|
||||
UpdateAction::None
|
||||
}
|
||||
|
||||
fn set_ratio_at_path(node: &mut DockNode, path: &[usize], ratio: f32) {
|
||||
if path.is_empty() {
|
||||
if let DockNode::Split { ratio: r, .. } = node { *r = ratio; }
|
||||
return;
|
||||
}
|
||||
if let DockNode::Split { children, .. } = node {
|
||||
Self::set_ratio_at_path(&mut children[path[0]], &path[1..], ratio);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_active_nth(node: &mut DockNode, target: usize, new_active: usize, count: &mut usize) {
|
||||
match node {
|
||||
DockNode::Leaf { active, .. } => {
|
||||
if *count == target { *active = new_active; }
|
||||
*count += 1;
|
||||
}
|
||||
DockNode::Split { children, .. } => {
|
||||
Self::set_active_nth(&mut children[0], target, new_active, count);
|
||||
Self::set_active_nth(&mut children[1], target, new_active, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run tests to verify they pass**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock -- --nocapture`
|
||||
Expected: all tests PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/dock.rs
|
||||
git commit -m "feat(editor): add update method with tab click and resize handling"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: draw_chrome — tab bars and split lines
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/dock.rs`
|
||||
|
||||
- [ ] **Step 1: Write failing tests**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_draw_chrome_produces_draw_commands() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.5,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::leaf(vec![2]),
|
||||
),
|
||||
vec!["A", "B", "C"],
|
||||
);
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 800.0, h: 600.0 });
|
||||
dock.draw_chrome(&mut ui);
|
||||
assert!(ui.draw_list.commands.len() >= 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_draw_chrome_active_tab_color() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 1 },
|
||||
vec!["AA", "BB"],
|
||||
);
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
|
||||
dock.draw_chrome(&mut ui);
|
||||
// First tab bg (inactive): color [40, 40, 40, 255]
|
||||
let first_bg = &ui.draw_list.vertices[0];
|
||||
assert_eq!(first_bg.color, [40, 40, 40, 255]);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock::tests::test_draw_chrome -- --nocapture`
|
||||
Expected: FAIL (draw_chrome doesn't exist)
|
||||
|
||||
- [ ] **Step 3: Implement draw_chrome**
|
||||
|
||||
Inline glyph rendering to avoid `&mut ui.draw_list` + `&ui.font` borrow conflict (same pattern as existing `widgets.rs`):
|
||||
|
||||
```rust
|
||||
impl DockTree {
|
||||
pub fn draw_chrome(&self, ui: &mut UiContext) {
|
||||
let glyph_w = ui.font.glyph_width as f32;
|
||||
let glyph_h = ui.font.glyph_height as f32;
|
||||
|
||||
const COLOR_TAB_ACTIVE: [u8; 4] = [60, 60, 60, 255];
|
||||
const COLOR_TAB_INACTIVE: [u8; 4] = [40, 40, 40, 255];
|
||||
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
|
||||
const COLOR_SPLIT: [u8; 4] = [30, 30, 30, 255];
|
||||
const COLOR_SPLIT_ACTIVE: [u8; 4] = [100, 100, 200, 255];
|
||||
const COLOR_SEPARATOR: [u8; 4] = [50, 50, 50, 255];
|
||||
|
||||
// Draw tab bars
|
||||
for leaf in &self.cached_leaves {
|
||||
let bar = &leaf.tab_bar_rect;
|
||||
let active = leaf.active.min(leaf.tabs.len().saturating_sub(1));
|
||||
let mut tx = bar.x;
|
||||
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
|
||||
let name = self.names.get(panel_id as usize).copied().unwrap_or("?");
|
||||
let tab_w = name.len() as f32 * glyph_w + TAB_PADDING;
|
||||
let bg = if i == active { COLOR_TAB_ACTIVE } else { COLOR_TAB_INACTIVE };
|
||||
ui.draw_list.add_rect(tx, bar.y, tab_w, bar.h, bg);
|
||||
// Inline text rendering
|
||||
let text_x = tx + TAB_PADDING * 0.5;
|
||||
let text_y = bar.y + (bar.h - glyph_h) * 0.5;
|
||||
let mut cx = text_x;
|
||||
for ch in name.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, glyph_w, glyph_h, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += glyph_w;
|
||||
}
|
||||
tx += tab_w;
|
||||
}
|
||||
// Tab bar separator
|
||||
ui.draw_list.add_rect(bar.x, bar.y + bar.h - 1.0, bar.w, 1.0, COLOR_SEPARATOR);
|
||||
}
|
||||
|
||||
// Draw split lines
|
||||
for split in &self.cached_splits {
|
||||
let color = if self.resizing.as_ref().map(|r| &r.path) == Some(&split.path) {
|
||||
COLOR_SPLIT_ACTIVE
|
||||
} else {
|
||||
COLOR_SPLIT
|
||||
};
|
||||
match split.axis {
|
||||
Axis::Horizontal => ui.draw_list.add_rect(split.boundary - 0.5, split.rect.y, 1.0, split.rect.h, color),
|
||||
Axis::Vertical => ui.draw_list.add_rect(split.rect.x, split.boundary - 0.5, split.rect.w, 1.0, color),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Run all tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock -- --nocapture`
|
||||
Expected: all tests PASS
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/dock.rs
|
||||
git commit -m "feat(editor): add draw_chrome for tab bars and split lines"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Integration test + update editor_demo
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/dock.rs`
|
||||
- Modify: `examples/editor_demo/src/main.rs`
|
||||
|
||||
- [ ] **Step 1: Write integration test**
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_full_frame_cycle() {
|
||||
let mut dock = DockTree::new(
|
||||
DockNode::split(Axis::Horizontal, 0.3,
|
||||
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
|
||||
DockNode::split(Axis::Vertical, 0.6, DockNode::leaf(vec![2]), DockNode::leaf(vec![3])),
|
||||
),
|
||||
vec!["Hierarchy", "Inspector", "Viewport", "Console"],
|
||||
);
|
||||
let mut ui = UiContext::new(1280.0, 720.0);
|
||||
for _ in 0..3 {
|
||||
ui.begin_frame(100.0, 100.0, false);
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 1280.0, h: 720.0 });
|
||||
dock.update(100.0, 100.0, false);
|
||||
dock.draw_chrome(&mut ui);
|
||||
assert_eq!(areas.len(), 4);
|
||||
for (_, r) in &areas {
|
||||
assert!(r.w > 0.0);
|
||||
assert!(r.h > 0.0);
|
||||
}
|
||||
ui.end_frame();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run test**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib dock::tests::test_full_frame -- --nocapture`
|
||||
Expected: PASS
|
||||
|
||||
- [ ] **Step 3: Update editor_demo to use docking**
|
||||
|
||||
Read `examples/editor_demo/src/main.rs`. Replace the single `begin_panel`/`end_panel` block with a docked layout:
|
||||
|
||||
- Add `dock: DockTree` to the app state struct
|
||||
- Initialize with 4-panel layout (Debug, Viewport, Properties, Console)
|
||||
- In render loop: call `dock.layout()`, `dock.update()`, `dock.draw_chrome()`
|
||||
- Draw panel contents by matching `panel_id` and setting `ui.layout = LayoutState::new(rect.x + 4.0, rect.y + 4.0)`
|
||||
|
||||
- [ ] **Step 4: Build**
|
||||
|
||||
Run: `cargo build --example editor_demo`
|
||||
Expected: compiles
|
||||
|
||||
- [ ] **Step 5: Run all tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor -- --nocapture`
|
||||
Expected: all tests PASS
|
||||
|
||||
- [ ] **Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/dock.rs examples/editor_demo/src/main.rs
|
||||
git commit -m "feat(editor): integrate docking into editor_demo"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Update docs
|
||||
|
||||
**Files:**
|
||||
- Modify: `docs/STATUS.md`
|
||||
- Modify: `docs/DEFERRED.md`
|
||||
|
||||
- [ ] **Step 1: Update STATUS.md**
|
||||
|
||||
Add to Phase 8-4 section:
|
||||
```
|
||||
- voltex_editor: DockTree (binary split layout, resize, tabs)
|
||||
```
|
||||
|
||||
Update test count.
|
||||
|
||||
- [ ] **Step 2: Update DEFERRED.md**
|
||||
|
||||
```
|
||||
- ~~**도킹, 탭, 윈도우 드래그**~~ ✅ DockTree (이진 분할, 리사이즈, 탭 전환) 완료. 플로팅 윈도우/탭 드래그 이동 미구현.
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add docs/STATUS.md docs/DEFERRED.md
|
||||
git commit -m "docs: update STATUS.md and DEFERRED.md with docking system"
|
||||
```
|
||||
553
docs/superpowers/plans/2026-03-26-entity-inspector.md
Normal file
553
docs/superpowers/plans/2026-03-26-entity-inspector.md
Normal file
@@ -0,0 +1,553 @@
|
||||
# Entity Inspector Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add entity hierarchy tree and property inspector panels to the editor, supporting Transform/Tag/Parent component display and editing.
|
||||
|
||||
**Architecture:** Two public functions (`hierarchy_panel`, `inspector_panel`) in a new `inspector.rs` module that take `UiContext` + `World` and draw IMGUI widgets. No new structs needed. Copy-out pattern for Transform editing to avoid borrow conflicts. Tag editing via caller-owned staging buffer.
|
||||
|
||||
**Tech Stack:** Rust, voltex_ecs (World, Entity, Transform, Tag, Parent, Children, roots), voltex_editor (UiContext, Rect, LayoutState, widgets)
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-03-26-entity-inspector-design.md`
|
||||
|
||||
---
|
||||
|
||||
### Task 1: hierarchy_panel + tests
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/inspector.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
- Modify: `crates/voltex_editor/Cargo.toml`
|
||||
|
||||
- [ ] **Step 1: Add voltex_ecs dependency**
|
||||
|
||||
In `crates/voltex_editor/Cargo.toml`, add:
|
||||
```toml
|
||||
voltex_ecs.workspace = true
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Write failing tests**
|
||||
|
||||
Create `crates/voltex_editor/src/inspector.rs`:
|
||||
|
||||
```rust
|
||||
use crate::ui_context::UiContext;
|
||||
use crate::dock::Rect;
|
||||
use crate::layout::LayoutState;
|
||||
use voltex_ecs::world::World;
|
||||
use voltex_ecs::entity::Entity;
|
||||
use voltex_ecs::transform::Transform;
|
||||
use voltex_ecs::scene::Tag;
|
||||
use voltex_ecs::hierarchy::{roots, Children, Parent};
|
||||
|
||||
const COLOR_SELECTED: [u8; 4] = [0x44, 0x66, 0x88, 0xFF];
|
||||
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
|
||||
const LINE_HEIGHT: f32 = 16.0;
|
||||
const INDENT: f32 = 16.0;
|
||||
const PADDING: f32 = 4.0;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use voltex_math::Vec3;
|
||||
|
||||
fn make_test_world() -> (World, Entity, Entity, Entity) {
|
||||
let mut world = World::new();
|
||||
let e1 = world.spawn();
|
||||
world.add(e1, Transform::from_position(Vec3::new(0.0, 0.0, 0.0)));
|
||||
world.add(e1, Tag("Root".to_string()));
|
||||
|
||||
let e2 = world.spawn();
|
||||
world.add(e2, Transform::from_position(Vec3::new(1.0, 0.0, 0.0)));
|
||||
world.add(e2, Tag("Child1".to_string()));
|
||||
|
||||
let e3 = world.spawn();
|
||||
world.add(e3, Transform::from_position(Vec3::new(2.0, 0.0, 0.0)));
|
||||
world.add(e3, Tag("Child2".to_string()));
|
||||
|
||||
// e2, e3 are children of e1
|
||||
voltex_ecs::hierarchy::add_child(&mut world, e1, e2);
|
||||
voltex_ecs::hierarchy::add_child(&mut world, e1, e3);
|
||||
|
||||
(world, e1, e2, e3)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_nodes() {
|
||||
let (world, _, _, _) = make_test_world();
|
||||
let root_entities = roots(&world);
|
||||
let count: usize = root_entities.iter().map(|e| count_entity_nodes(&world, *e)).sum();
|
||||
assert_eq!(count, 3); // Root + Child1 + Child2
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_panel_draws_commands() {
|
||||
let (world, _, _, _) = make_test_world();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(ui.draw_list.commands.len() > 0, "should draw entity names");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_panel_click_selects() {
|
||||
let (world, e1, _, _) = make_test_world();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
// Frame 1: mouse down on first entity row (y ~= PADDING, within LINE_HEIGHT)
|
||||
ui.begin_frame(50.0, PADDING + 2.0, true);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert_eq!(selected, Some(e1), "clicking first row should select root entity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hierarchy_panel_empty_world() {
|
||||
let world = World::new();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let mut selected: Option<Entity> = None;
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
|
||||
ui.end_frame();
|
||||
|
||||
assert!(selected.is_none());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib inspector`
|
||||
Expected: FAIL (functions don't exist)
|
||||
|
||||
- [ ] **Step 4: Implement hierarchy_panel**
|
||||
|
||||
Add implementation above tests in `inspector.rs`:
|
||||
|
||||
```rust
|
||||
/// Count total nodes in the subtree rooted at entity (including self).
|
||||
fn count_entity_nodes(world: &World, entity: Entity) -> usize {
|
||||
let mut count = 1;
|
||||
if let Some(children) = world.get::<Children>(entity) {
|
||||
for &child in &children.0 {
|
||||
count += count_entity_nodes(world, child);
|
||||
}
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
/// Draw a single entity row in the hierarchy tree.
|
||||
fn draw_entity_node(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
entity: Entity,
|
||||
depth: usize,
|
||||
selected: &mut Option<Entity>,
|
||||
base_x: f32,
|
||||
row_w: f32,
|
||||
) {
|
||||
let y = ui.layout.cursor_y;
|
||||
let x = base_x + PADDING + depth as f32 * INDENT;
|
||||
|
||||
// Highlight selected
|
||||
if *selected == Some(entity) {
|
||||
ui.draw_list.add_rect(base_x, y, row_w, LINE_HEIGHT, COLOR_SELECTED);
|
||||
}
|
||||
|
||||
// Click detection
|
||||
if ui.mouse_clicked && ui.mouse_in_rect(base_x, y, row_w, LINE_HEIGHT) {
|
||||
*selected = Some(entity);
|
||||
}
|
||||
|
||||
// Build display text
|
||||
let has_children = world.get::<Children>(entity).map_or(false, |c| !c.0.is_empty());
|
||||
let prefix = if has_children { "> " } else { " " };
|
||||
let name = if let Some(tag) = world.get::<Tag>(entity) {
|
||||
format!("{}{}", prefix, tag.0)
|
||||
} else {
|
||||
format!("{}Entity({})", prefix, entity.id)
|
||||
};
|
||||
|
||||
// Draw text
|
||||
let gw = ui.font.glyph_width as f32;
|
||||
let gh = ui.font.glyph_height as f32;
|
||||
let text_y = y + (LINE_HEIGHT - gh) * 0.5;
|
||||
let mut cx = x;
|
||||
for ch in name.chars() {
|
||||
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
|
||||
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
|
||||
cx += gw;
|
||||
}
|
||||
|
||||
ui.layout.cursor_y += LINE_HEIGHT;
|
||||
|
||||
// Recurse into children
|
||||
if let Some(children) = world.get::<Children>(entity) {
|
||||
let child_list: Vec<Entity> = children.0.clone();
|
||||
for child in child_list {
|
||||
draw_entity_node(ui, world, child, depth + 1, selected, base_x, row_w);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Draw the hierarchy panel showing entity tree.
|
||||
pub fn hierarchy_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
selected: &mut Option<Entity>,
|
||||
rect: &Rect,
|
||||
) {
|
||||
let root_entities = roots(world);
|
||||
|
||||
// Count total nodes for scroll content height
|
||||
let total_nodes: usize = root_entities.iter().map(|e| count_entity_nodes(world, *e)).sum();
|
||||
let content_height = total_nodes as f32 * LINE_HEIGHT + PADDING * 2.0;
|
||||
|
||||
ui.begin_scroll_panel(
|
||||
9000, // unique scroll panel ID
|
||||
rect.x, rect.y, rect.w, rect.h,
|
||||
content_height,
|
||||
);
|
||||
|
||||
ui.layout.cursor_y = rect.y + PADDING - ui.scroll_offsets.get(&9000).copied().unwrap_or(0.0);
|
||||
|
||||
for &entity in &root_entities {
|
||||
draw_entity_node(ui, world, entity, 0, selected, rect.x, rect.w);
|
||||
}
|
||||
|
||||
ui.end_scroll_panel();
|
||||
}
|
||||
```
|
||||
|
||||
Note: The scroll panel sets cursor position internally, but we need to manually track y for our custom row rendering. The `begin_scroll_panel` pushes a scissor rect — we draw inside that clipped area.
|
||||
|
||||
Actually, let's simplify: don't use scroll_panel for v1. The spec mentions it, but for a first pass with few entities, just set layout cursor and draw directly. Add scroll in a follow-up if needed, to keep this task focused.
|
||||
|
||||
Simplified: just set `ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING)` and draw rows. No scroll for now.
|
||||
|
||||
```rust
|
||||
pub fn hierarchy_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
selected: &mut Option<Entity>,
|
||||
rect: &Rect,
|
||||
) {
|
||||
let root_entities = roots(world);
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
|
||||
for &entity in &root_entities {
|
||||
draw_entity_node(ui, world, entity, 0, selected, rect.x, rect.w);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod inspector;
|
||||
pub use inspector::{hierarchy_panel, inspector_panel};
|
||||
```
|
||||
|
||||
(inspector_panel will be a stub for now — `pub fn inspector_panel(...) {}`)
|
||||
|
||||
- [ ] **Step 6: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib inspector -- --nocapture`
|
||||
Expected: all 4 tests PASS
|
||||
|
||||
- [ ] **Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/Cargo.toml crates/voltex_editor/src/inspector.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add hierarchy_panel with entity tree display and selection"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: inspector_panel + tests
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/inspector.rs`
|
||||
|
||||
- [ ] **Step 1: Write failing tests**
|
||||
|
||||
Add to `mod tests` in inspector.rs:
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_inspector_panel_no_selection() {
|
||||
let mut world = World::new();
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, None, &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// Should have drawn "No entity selected" text
|
||||
assert!(ui.draw_list.commands.len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inspector_panel_with_transform() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
|
||||
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// Should have drawn Transform header + sliders
|
||||
// Sliders produce multiple draw commands (bg + handle + label text)
|
||||
assert!(ui.draw_list.commands.len() > 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inspector_panel_with_tag() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn();
|
||||
world.add(e, Transform::new());
|
||||
world.add(e, Tag("TestTag".to_string()));
|
||||
|
||||
let mut ui = UiContext::new(800.0, 600.0);
|
||||
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
|
||||
|
||||
ui.begin_frame(0.0, 0.0, false);
|
||||
let mut tag_buf = String::new();
|
||||
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
|
||||
ui.end_frame();
|
||||
|
||||
// tag_buf should be synced to "TestTag"
|
||||
assert_eq!(tag_buf, "TestTag");
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib inspector`
|
||||
Expected: FAIL (inspector_panel is stub)
|
||||
|
||||
- [ ] **Step 3: Implement inspector_panel**
|
||||
|
||||
Replace the stub with:
|
||||
|
||||
```rust
|
||||
/// Draw the inspector panel for the selected entity.
|
||||
/// `tag_buffer` is a caller-owned staging buffer for Tag editing.
|
||||
pub fn inspector_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &mut World,
|
||||
selected: Option<Entity>,
|
||||
rect: &Rect,
|
||||
tag_buffer: &mut String,
|
||||
) {
|
||||
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
|
||||
|
||||
let entity = match selected {
|
||||
Some(e) => e,
|
||||
None => {
|
||||
ui.text("No entity selected");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// --- Transform ---
|
||||
if world.has_component::<Transform>(entity) {
|
||||
ui.text("-- Transform --");
|
||||
|
||||
// Copy out values (immutable borrow ends here)
|
||||
let (px, py, pz, rx, ry, rz, sx, sy, sz) = {
|
||||
let t = world.get::<Transform>(entity).unwrap();
|
||||
(t.position.x, t.position.y, t.position.z,
|
||||
t.rotation.x, t.rotation.y, t.rotation.z,
|
||||
t.scale.x, t.scale.y, t.scale.z)
|
||||
};
|
||||
|
||||
// Sliders (no world borrow)
|
||||
let new_px = ui.slider("Pos X", px, -50.0, 50.0);
|
||||
let new_py = ui.slider("Pos Y", py, -50.0, 50.0);
|
||||
let new_pz = ui.slider("Pos Z", pz, -50.0, 50.0);
|
||||
let new_rx = ui.slider("Rot X", rx, -3.15, 3.15);
|
||||
let new_ry = ui.slider("Rot Y", ry, -3.15, 3.15);
|
||||
let new_rz = ui.slider("Rot Z", rz, -3.15, 3.15);
|
||||
let new_sx = ui.slider("Scl X", sx, 0.01, 10.0);
|
||||
let new_sy = ui.slider("Scl Y", sy, 0.01, 10.0);
|
||||
let new_sz = ui.slider("Scl Z", sz, 0.01, 10.0);
|
||||
|
||||
// Write back (mutable borrow)
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position.x = new_px;
|
||||
t.position.y = new_py;
|
||||
t.position.z = new_pz;
|
||||
t.rotation.x = new_rx;
|
||||
t.rotation.y = new_ry;
|
||||
t.rotation.z = new_rz;
|
||||
t.scale.x = new_sx;
|
||||
t.scale.y = new_sy;
|
||||
t.scale.z = new_sz;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Tag ---
|
||||
if world.has_component::<Tag>(entity) {
|
||||
ui.text("-- Tag --");
|
||||
|
||||
// Sync buffer from world if it doesn't match
|
||||
if let Some(tag) = world.get::<Tag>(entity) {
|
||||
if tag_buffer.is_empty() || *tag_buffer != tag.0 {
|
||||
*tag_buffer = tag.0.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let input_x = rect.x + PADDING;
|
||||
let input_y = ui.layout.cursor_y;
|
||||
let input_w = rect.w - PADDING * 2.0;
|
||||
if ui.text_input(8888, tag_buffer, input_x, input_y, input_w) {
|
||||
// Buffer changed — write back to world
|
||||
if let Some(tag) = world.get_mut::<Tag>(entity) {
|
||||
tag.0 = tag_buffer.clone();
|
||||
}
|
||||
}
|
||||
ui.layout.advance_line();
|
||||
}
|
||||
|
||||
// --- Parent ---
|
||||
if let Some(parent) = world.get::<Parent>(entity) {
|
||||
ui.text("-- Parent --");
|
||||
let parent_text = format!("Parent: Entity({})", parent.0.id);
|
||||
ui.text(&parent_text);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Update the function signature in lib.rs re-export**
|
||||
|
||||
Make sure `inspector_panel` in `pub use` includes the new signature.
|
||||
|
||||
- [ ] **Step 5: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib inspector -- --nocapture`
|
||||
Expected: all 7 tests PASS (4 hierarchy + 3 inspector)
|
||||
|
||||
- [ ] **Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/inspector.rs
|
||||
git commit -m "feat(editor): add inspector_panel with Transform, Tag, Parent editing"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Integrate into editor_demo
|
||||
|
||||
**Files:**
|
||||
- Modify: `examples/editor_demo/src/main.rs`
|
||||
|
||||
- [ ] **Step 1: Add ECS World and demo entities**
|
||||
|
||||
Read current `examples/editor_demo/src/main.rs`. Add to AppState:
|
||||
```rust
|
||||
world: World,
|
||||
selected_entity: Option<Entity>,
|
||||
tag_buffer: String,
|
||||
```
|
||||
|
||||
In `resumed`, after scene mesh creation, create ECS entities that mirror the scene:
|
||||
```rust
|
||||
let mut world = World::new();
|
||||
let ground = world.spawn();
|
||||
world.add(ground, Transform::from_position(Vec3::new(0.0, 0.0, 0.0)));
|
||||
world.add(ground, Tag("Ground".to_string()));
|
||||
|
||||
let cube1 = world.spawn();
|
||||
world.add(cube1, Transform::from_position(Vec3::new(0.0, 0.5, 0.0)));
|
||||
world.add(cube1, Tag("Cube1".to_string()));
|
||||
|
||||
let cube2 = world.spawn();
|
||||
world.add(cube2, Transform::from_position(Vec3::new(2.0, 0.5, 1.0)));
|
||||
world.add(cube2, Tag("Cube2".to_string()));
|
||||
|
||||
let cube3 = world.spawn();
|
||||
world.add(cube3, Transform::from_position(Vec3::new(-1.5, 0.5, -1.0)));
|
||||
world.add(cube3, Tag("Cube3".to_string()));
|
||||
|
||||
// Cube2 is child of Cube1
|
||||
voltex_ecs::hierarchy::add_child(&mut world, cube1, cube2);
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Wire panels to hierarchy/inspector**
|
||||
|
||||
In the dock panel loop, replace panel 0 (Debug) with Hierarchy, and panel 2 (Properties) with Inspector:
|
||||
```rust
|
||||
0 => {
|
||||
// Hierarchy panel
|
||||
hierarchy_panel(&mut state.ui, &state.world, &mut state.selected_entity, rect);
|
||||
}
|
||||
// ... panel 1 stays as viewport ...
|
||||
2 => {
|
||||
// Inspector panel
|
||||
inspector_panel(&mut state.ui, &mut state.world, state.selected_entity, rect, &mut state.tag_buffer);
|
||||
}
|
||||
```
|
||||
|
||||
Update dock panel names to `["Hierarchy", "Viewport", "Inspector", "Console"]`.
|
||||
|
||||
- [ ] **Step 3: Build and test**
|
||||
|
||||
Run: `cargo build -p editor_demo`
|
||||
Expected: compiles
|
||||
|
||||
Run: `cargo test -p voltex_editor -- --nocapture`
|
||||
Expected: all tests pass
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add examples/editor_demo/src/main.rs
|
||||
git commit -m "feat(editor): integrate hierarchy and inspector panels into editor_demo"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Update docs
|
||||
|
||||
**Files:**
|
||||
- Modify: `docs/STATUS.md`
|
||||
- Modify: `docs/DEFERRED.md`
|
||||
|
||||
- [ ] **Step 1: Update STATUS.md**
|
||||
|
||||
Add to Phase 8-4:
|
||||
```
|
||||
- voltex_editor: hierarchy_panel, inspector_panel (Transform/Tag/Parent editing)
|
||||
```
|
||||
Update test count.
|
||||
|
||||
- [ ] **Step 2: Update DEFERRED.md**
|
||||
|
||||
```
|
||||
- ~~**엔티티 인스펙터**~~ ✅ Transform/Tag/Parent 편집 완료. 커스텀 컴포넌트 리플렉션 미구현.
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add docs/STATUS.md docs/DEFERRED.md
|
||||
git commit -m "docs: update STATUS.md and DEFERRED.md with entity inspector"
|
||||
```
|
||||
773
docs/superpowers/plans/2026-03-26-scene-viewport.md
Normal file
773
docs/superpowers/plans/2026-03-26-scene-viewport.md
Normal file
@@ -0,0 +1,773 @@
|
||||
# Scene Viewport Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Embed a 3D scene viewport inside the editor's docking panel with orbit camera controls and Blinn-Phong rendering.
|
||||
|
||||
**Architecture:** Render 3D scene to an offscreen texture (Rgba8Unorm + Depth32Float), then blit that texture to the surface as a textured quad within the viewport panel's rect. OrbitCamera provides view/projection. Reuse existing mesh_shader.wgsl pipeline for 3D rendering.
|
||||
|
||||
**Tech Stack:** Rust, wgpu 28.0, voltex_math (Vec3/Mat4), voltex_renderer (Mesh/MeshVertex/CameraUniform/LightUniform/pipeline), voltex_editor (docking/IMGUI)
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-03-26-scene-viewport-design.md`
|
||||
|
||||
---
|
||||
|
||||
### Task 1: OrbitCamera (pure math, no GPU)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/orbit_camera.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
- Modify: `crates/voltex_editor/Cargo.toml`
|
||||
|
||||
- [ ] **Step 1: Add voltex_math dependency**
|
||||
|
||||
In `crates/voltex_editor/Cargo.toml`, add under `[dependencies]`:
|
||||
```toml
|
||||
voltex_math.workspace = true
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Write failing tests**
|
||||
|
||||
Create `crates/voltex_editor/src/orbit_camera.rs` with tests:
|
||||
|
||||
```rust
|
||||
use voltex_math::{Vec3, Mat4};
|
||||
|
||||
use std::f32::consts::PI;
|
||||
|
||||
const PITCH_LIMIT: f32 = PI / 2.0 - 0.01;
|
||||
const MIN_DISTANCE: f32 = 0.5;
|
||||
const MAX_DISTANCE: f32 = 50.0;
|
||||
const ORBIT_SENSITIVITY: f32 = 0.005;
|
||||
const ZOOM_FACTOR: f32 = 0.1;
|
||||
const PAN_SENSITIVITY: f32 = 0.01;
|
||||
|
||||
pub struct OrbitCamera {
|
||||
pub target: Vec3,
|
||||
pub distance: f32,
|
||||
pub yaw: f32,
|
||||
pub pitch: f32,
|
||||
pub fov_y: f32,
|
||||
pub near: f32,
|
||||
pub far: f32,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_position() {
|
||||
let cam = OrbitCamera::new();
|
||||
let pos = cam.position();
|
||||
// Default: yaw=0, pitch=0.3, distance=5
|
||||
// pos = target + (d*cos(p)*sin(y), d*sin(p), d*cos(p)*cos(y))
|
||||
// yaw=0 → sin=0, cos=1 → x=0, z=d*cos(p)
|
||||
assert!((pos.x).abs() < 1e-3);
|
||||
assert!(pos.y > 0.0); // pitch > 0 → y > 0
|
||||
assert!(pos.z > 0.0); // cos(0)*cos(p) > 0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orbit_changes_yaw_pitch() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let old_yaw = cam.yaw;
|
||||
let old_pitch = cam.pitch;
|
||||
cam.orbit(100.0, 50.0);
|
||||
assert!((cam.yaw - old_yaw - 100.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
|
||||
assert!((cam.pitch - old_pitch - 50.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pitch_clamped() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
cam.orbit(0.0, 100000.0); // huge pitch
|
||||
assert!(cam.pitch <= PITCH_LIMIT);
|
||||
cam.orbit(0.0, -200000.0); // huge negative
|
||||
assert!(cam.pitch >= -PITCH_LIMIT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zoom_changes_distance() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let d0 = cam.distance;
|
||||
cam.zoom(1.0); // scroll up → zoom in
|
||||
assert!(cam.distance < d0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zoom_clamped() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
cam.zoom(1000.0); // massive zoom in
|
||||
assert!(cam.distance >= MIN_DISTANCE);
|
||||
cam.zoom(-10000.0); // massive zoom out
|
||||
assert!(cam.distance <= MAX_DISTANCE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pan_moves_target() {
|
||||
let mut cam = OrbitCamera::new();
|
||||
let t0 = cam.target;
|
||||
cam.pan(10.0, 0.0); // pan right
|
||||
assert!((cam.target.x - t0.x).abs() > 1e-4 || (cam.target.z - t0.z).abs() > 1e-4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_view_matrix_not_zero() {
|
||||
let cam = OrbitCamera::new();
|
||||
let v = cam.view_matrix();
|
||||
// At least some elements should be non-zero
|
||||
let sum: f32 = v.cols.iter().flat_map(|c| c.iter()).map(|x| x.abs()).sum();
|
||||
assert!(sum > 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_projection_matrix() {
|
||||
let cam = OrbitCamera::new();
|
||||
let p = cam.projection_matrix(16.0 / 9.0);
|
||||
// Element [0][0] should be related to fov and aspect
|
||||
assert!(p.cols[0][0] > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_view_projection() {
|
||||
let cam = OrbitCamera::new();
|
||||
let vp = cam.view_projection(1.0);
|
||||
let v = cam.view_matrix();
|
||||
let p = cam.projection_matrix(1.0);
|
||||
let expected = p.mul_mat4(&v);
|
||||
for i in 0..4 {
|
||||
for j in 0..4 {
|
||||
assert!((vp.cols[i][j] - expected.cols[i][j]).abs() < 1e-4,
|
||||
"mismatch at [{i}][{j}]: {} vs {}", vp.cols[i][j], expected.cols[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Run tests to verify they fail**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib orbit_camera`
|
||||
Expected: FAIL (methods don't exist)
|
||||
|
||||
- [ ] **Step 4: Implement OrbitCamera**
|
||||
|
||||
```rust
|
||||
impl OrbitCamera {
|
||||
pub fn new() -> Self {
|
||||
OrbitCamera {
|
||||
target: Vec3::ZERO,
|
||||
distance: 5.0,
|
||||
yaw: 0.0,
|
||||
pitch: 0.3,
|
||||
fov_y: PI / 4.0,
|
||||
near: 0.1,
|
||||
far: 100.0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn position(&self) -> Vec3 {
|
||||
let cp = self.pitch.cos();
|
||||
let sp = self.pitch.sin();
|
||||
let cy = self.yaw.cos();
|
||||
let sy = self.yaw.sin();
|
||||
Vec3::new(
|
||||
self.target.x + self.distance * cp * sy,
|
||||
self.target.y + self.distance * sp,
|
||||
self.target.z + self.distance * cp * cy,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn orbit(&mut self, dx: f32, dy: f32) {
|
||||
self.yaw += dx * ORBIT_SENSITIVITY;
|
||||
self.pitch += dy * ORBIT_SENSITIVITY;
|
||||
self.pitch = self.pitch.clamp(-PITCH_LIMIT, PITCH_LIMIT);
|
||||
}
|
||||
|
||||
pub fn zoom(&mut self, delta: f32) {
|
||||
self.distance *= 1.0 - delta * ZOOM_FACTOR;
|
||||
self.distance = self.distance.clamp(MIN_DISTANCE, MAX_DISTANCE);
|
||||
}
|
||||
|
||||
pub fn pan(&mut self, dx: f32, dy: f32) {
|
||||
let forward = (self.target - self.position()).normalize();
|
||||
let right = forward.cross(Vec3::Y);
|
||||
let right = if right.length() < 1e-4 { Vec3::X } else { right.normalize() };
|
||||
let up = right.cross(forward).normalize();
|
||||
let offset_x = right * (-dx * PAN_SENSITIVITY * self.distance);
|
||||
let offset_y = up * (dy * PAN_SENSITIVITY * self.distance);
|
||||
self.target = self.target + offset_x + offset_y;
|
||||
}
|
||||
|
||||
pub fn view_matrix(&self) -> Mat4 {
|
||||
Mat4::look_at(self.position(), self.target, Vec3::Y)
|
||||
}
|
||||
|
||||
pub fn projection_matrix(&self, aspect: f32) -> Mat4 {
|
||||
Mat4::perspective(self.fov_y, aspect, self.near, self.far)
|
||||
}
|
||||
|
||||
pub fn view_projection(&self, aspect: f32) -> Mat4 {
|
||||
self.projection_matrix(aspect).mul_mat4(&self.view_matrix())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod orbit_camera;
|
||||
pub use orbit_camera::OrbitCamera;
|
||||
```
|
||||
|
||||
- [ ] **Step 6: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib orbit_camera -- --nocapture`
|
||||
Expected: all 9 tests PASS
|
||||
|
||||
- [ ] **Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/Cargo.toml crates/voltex_editor/src/orbit_camera.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add OrbitCamera with orbit, zoom, pan controls"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: ViewportTexture (offscreen render target)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/viewport_texture.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Implement ViewportTexture**
|
||||
|
||||
Create `crates/voltex_editor/src/viewport_texture.rs`:
|
||||
|
||||
```rust
|
||||
pub const VIEWPORT_COLOR_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;
|
||||
pub const VIEWPORT_DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
|
||||
|
||||
pub struct ViewportTexture {
|
||||
pub color_texture: wgpu::Texture,
|
||||
pub color_view: wgpu::TextureView,
|
||||
pub depth_texture: wgpu::Texture,
|
||||
pub depth_view: wgpu::TextureView,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
impl ViewportTexture {
|
||||
pub fn new(device: &wgpu::Device, width: u32, height: u32) -> Self {
|
||||
let w = width.max(1);
|
||||
let h = height.max(1);
|
||||
|
||||
let color_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Viewport Color"),
|
||||
size: wgpu::Extent3d { width: w, height: h, depth_or_array_layers: 1 },
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: VIEWPORT_COLOR_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
let color_view = color_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Viewport Depth"),
|
||||
size: wgpu::Extent3d { width: w, height: h, depth_or_array_layers: 1 },
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: VIEWPORT_DEPTH_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
view_formats: &[],
|
||||
});
|
||||
let depth_view = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
ViewportTexture { color_texture, color_view, depth_texture, depth_view, width: w, height: h }
|
||||
}
|
||||
|
||||
/// Recreate textures if size changed. Returns true if recreated.
|
||||
pub fn ensure_size(&mut self, device: &wgpu::Device, width: u32, height: u32) -> bool {
|
||||
let w = width.max(1);
|
||||
let h = height.max(1);
|
||||
if w == self.width && h == self.height {
|
||||
return false;
|
||||
}
|
||||
*self = Self::new(device, w, h);
|
||||
true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod viewport_texture;
|
||||
pub use viewport_texture::ViewportTexture;
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Build and verify**
|
||||
|
||||
Run: `cargo build -p voltex_editor`
|
||||
Expected: compiles (no GPU tests possible, but struct/logic is trivial)
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/viewport_texture.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add ViewportTexture offscreen render target"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: ViewportRenderer (blit shader + pipeline)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/viewport_renderer.rs`
|
||||
- Create: `crates/voltex_editor/src/viewport_shader.wgsl`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write the WGSL shader**
|
||||
|
||||
Create `crates/voltex_editor/src/viewport_shader.wgsl`:
|
||||
|
||||
```wgsl
|
||||
struct RectUniform {
|
||||
rect: vec4<f32>, // x, y, w, h in pixels
|
||||
screen: vec2<f32>, // screen_w, screen_h
|
||||
_pad: vec2<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0) var<uniform> u: RectUniform;
|
||||
@group(0) @binding(1) var t_viewport: texture_2d<f32>;
|
||||
@group(0) @binding(2) var s_viewport: sampler;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(0) uv: vec2<f32>,
|
||||
};
|
||||
|
||||
@vertex
|
||||
fn vs_main(@builtin(vertex_index) idx: u32) -> VertexOutput {
|
||||
// 6 vertices for 2 triangles (fullscreen quad within rect)
|
||||
var positions = array<vec2<f32>, 6>(
|
||||
vec2<f32>(0.0, 0.0), // top-left
|
||||
vec2<f32>(1.0, 0.0), // top-right
|
||||
vec2<f32>(1.0, 1.0), // bottom-right
|
||||
vec2<f32>(0.0, 0.0), // top-left
|
||||
vec2<f32>(1.0, 1.0), // bottom-right
|
||||
vec2<f32>(0.0, 1.0), // bottom-left
|
||||
);
|
||||
|
||||
let p = positions[idx];
|
||||
// Convert pixel rect to NDC
|
||||
let px = u.rect.x + p.x * u.rect.z;
|
||||
let py = u.rect.y + p.y * u.rect.w;
|
||||
let ndc_x = (px / u.screen.x) * 2.0 - 1.0;
|
||||
let ndc_y = 1.0 - (py / u.screen.y) * 2.0; // Y flipped
|
||||
|
||||
var out: VertexOutput;
|
||||
out.position = vec4<f32>(ndc_x, ndc_y, 0.0, 1.0);
|
||||
out.uv = p;
|
||||
return out;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
return textureSample(t_viewport, s_viewport, in.uv);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Implement ViewportRenderer**
|
||||
|
||||
Create `crates/voltex_editor/src/viewport_renderer.rs`:
|
||||
|
||||
```rust
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
struct RectUniform {
|
||||
rect: [f32; 4], // x, y, w, h
|
||||
screen: [f32; 2], // screen_w, screen_h
|
||||
_pad: [f32; 2],
|
||||
}
|
||||
|
||||
pub struct ViewportRenderer {
|
||||
pipeline: wgpu::RenderPipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
sampler: wgpu::Sampler,
|
||||
uniform_buffer: wgpu::Buffer,
|
||||
}
|
||||
|
||||
impl ViewportRenderer {
|
||||
pub fn new(device: &wgpu::Device, surface_format: wgpu::TextureFormat) -> Self {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Viewport Blit Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("viewport_shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Viewport Blit BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Viewport Blit PL"),
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Viewport Blit Pipeline"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[], // no vertex buffer
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: surface_format,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: None,
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview_mask: None,
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("Viewport Sampler"),
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Viewport Rect Uniform"),
|
||||
size: std::mem::size_of::<RectUniform>() as u64,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
ViewportRenderer { pipeline, bind_group_layout, sampler, uniform_buffer }
|
||||
}
|
||||
|
||||
pub fn render(
|
||||
&self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
target_view: &wgpu::TextureView,
|
||||
viewport_color_view: &wgpu::TextureView,
|
||||
screen_w: f32,
|
||||
screen_h: f32,
|
||||
rect_x: f32,
|
||||
rect_y: f32,
|
||||
rect_w: f32,
|
||||
rect_h: f32,
|
||||
) {
|
||||
let uniform = RectUniform {
|
||||
rect: [rect_x, rect_y, rect_w, rect_h],
|
||||
screen: [screen_w, screen_h],
|
||||
_pad: [0.0; 2],
|
||||
};
|
||||
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniform]));
|
||||
|
||||
// Create bind group per frame (texture may change on resize)
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Viewport Blit BG"),
|
||||
layout: &self.bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: self.uniform_buffer.as_entire_binding() },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(viewport_color_view) },
|
||||
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::Sampler(&self.sampler) },
|
||||
],
|
||||
});
|
||||
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Viewport Blit Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: target_view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations { load: wgpu::LoadOp::Load, store: wgpu::StoreOp::Store },
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
multiview_mask: None,
|
||||
});
|
||||
|
||||
// Scissor to panel area
|
||||
rpass.set_scissor_rect(rect_x as u32, rect_y as u32, rect_w.ceil() as u32, rect_h.ceil() as u32);
|
||||
rpass.set_pipeline(&self.pipeline);
|
||||
rpass.set_bind_group(0, &bind_group, &[]);
|
||||
rpass.draw(0..6, 0..1);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod viewport_renderer;
|
||||
pub use viewport_renderer::ViewportRenderer;
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Build**
|
||||
|
||||
Run: `cargo build -p voltex_editor`
|
||||
Expected: compiles
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/src/viewport_shader.wgsl crates/voltex_editor/src/viewport_renderer.rs crates/voltex_editor/src/lib.rs
|
||||
git commit -m "feat(editor): add ViewportRenderer blit pipeline and shader"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Integrate into editor_demo
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/Cargo.toml`
|
||||
- Modify: `examples/editor_demo/src/main.rs`
|
||||
|
||||
This is the largest task. The editor_demo needs:
|
||||
- voltex_renderer dependency in voltex_editor for Mesh/MeshVertex/CameraUniform/LightUniform/pipeline
|
||||
- A simple 3D scene (cubes + ground + light)
|
||||
- Render scene to ViewportTexture, blit to surface, orbit camera input
|
||||
|
||||
- [ ] **Step 1: Add voltex_renderer dependency**
|
||||
|
||||
In `crates/voltex_editor/Cargo.toml`:
|
||||
```toml
|
||||
voltex_renderer.workspace = true
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update editor_demo imports and state**
|
||||
|
||||
Read `examples/editor_demo/src/main.rs` first. Then add:
|
||||
|
||||
Imports:
|
||||
```rust
|
||||
use voltex_editor::{
|
||||
UiContext, UiRenderer, DockTree, DockNode, Axis, Rect, LayoutState,
|
||||
OrbitCamera, ViewportTexture, ViewportRenderer,
|
||||
};
|
||||
use voltex_renderer::{Mesh, MeshVertex, CameraUniform, LightUniform, GpuTexture};
|
||||
```
|
||||
|
||||
Add to `AppState`:
|
||||
```rust
|
||||
// Viewport state
|
||||
orbit_cam: OrbitCamera,
|
||||
viewport_tex: ViewportTexture,
|
||||
viewport_renderer: ViewportRenderer,
|
||||
// 3D scene resources
|
||||
scene_pipeline: wgpu::RenderPipeline,
|
||||
camera_buffer: wgpu::Buffer,
|
||||
light_buffer: wgpu::Buffer,
|
||||
camera_light_bg: wgpu::BindGroup,
|
||||
scene_meshes: Vec<(Mesh, [[f32; 4]; 4])>, // mesh + model matrix
|
||||
dummy_texture: GpuTexture,
|
||||
// Mouse tracking for orbit
|
||||
prev_mouse: (f32, f32),
|
||||
left_dragging: bool,
|
||||
middle_dragging: bool,
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Initialize 3D scene in `resumed`**
|
||||
|
||||
After UI initialization, create:
|
||||
1. OrbitCamera
|
||||
2. ViewportTexture (initial size 640x480)
|
||||
3. ViewportRenderer
|
||||
4. Camera/Light bind group layout, buffers, bind group
|
||||
5. Texture bind group layout + white 1x1 dummy texture
|
||||
6. Mesh pipeline with Rgba8Unorm target format
|
||||
7. Scene meshes: ground plane + 3 cubes (using MeshVertex)
|
||||
|
||||
The scene pipeline must use `VIEWPORT_COLOR_FORMAT` (Rgba8Unorm) not `surface_format`.
|
||||
|
||||
Helper to generate a cube mesh:
|
||||
```rust
|
||||
fn make_cube(device: &wgpu::Device) -> Mesh {
|
||||
// 24 vertices (4 per face, 6 faces), 36 indices
|
||||
// Each face: position, normal, uv=(0,0), tangent=(1,0,0,1)
|
||||
// ... standard unit cube centered at origin
|
||||
}
|
||||
|
||||
fn make_ground(device: &wgpu::Device) -> Mesh {
|
||||
// 4 vertices forming a 10x10 quad at y=0
|
||||
// normal = (0,1,0)
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Handle mouse input for orbit camera**
|
||||
|
||||
In `window_event`:
|
||||
- Track `prev_mouse` position for dx/dy calculation
|
||||
- Track `left_dragging` and `middle_dragging` state from mouse button events
|
||||
- Feed scroll delta for zoom
|
||||
|
||||
In the render loop, after getting viewport rect:
|
||||
```rust
|
||||
if rect.contains(mx, my) {
|
||||
let dx = mx - prev_mouse.0;
|
||||
let dy = my - prev_mouse.1;
|
||||
if left_dragging { orbit_cam.orbit(dx, dy); }
|
||||
if middle_dragging { orbit_cam.pan(dx, dy); }
|
||||
if scroll != 0.0 { orbit_cam.zoom(scroll); }
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Render 3D scene to viewport texture**
|
||||
|
||||
In the render loop, for viewport panel:
|
||||
```rust
|
||||
1 => {
|
||||
// Ensure viewport texture matches panel size
|
||||
viewport_tex.ensure_size(&gpu.device, rect.w as u32, rect.h as u32);
|
||||
|
||||
// Update camera uniform
|
||||
let aspect = rect.w / rect.h;
|
||||
let vp = orbit_cam.view_projection(aspect);
|
||||
let cam_pos = orbit_cam.position();
|
||||
|
||||
// Render each mesh to viewport texture
|
||||
// For each mesh: update model matrix in camera_uniform, write_buffer, draw
|
||||
{
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: &viewport_tex.color_view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.15, g: 0.15, b: 0.2, a: 1.0 }),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: &viewport_tex.depth_view,
|
||||
depth_ops: Some(wgpu::Operations { load: wgpu::LoadOp::Clear(1.0), store: wgpu::StoreOp::Store }),
|
||||
stencil_ops: None,
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
rpass.set_pipeline(&scene_pipeline);
|
||||
rpass.set_bind_group(0, &camera_light_bg, &[]);
|
||||
rpass.set_bind_group(1, &dummy_texture.bind_group, &[]);
|
||||
|
||||
for (mesh, _model) in &scene_meshes {
|
||||
rpass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
|
||||
rpass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
|
||||
rpass.draw_indexed(0..mesh.num_indices, 0, 0..1);
|
||||
}
|
||||
}
|
||||
|
||||
// Blit viewport texture to surface
|
||||
viewport_renderer.render(
|
||||
&gpu.device, &gpu.queue, &mut encoder, &surface_view,
|
||||
&viewport_tex.color_view,
|
||||
screen_w, screen_h,
|
||||
rect.x, rect.y, rect.w, rect.h,
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Note: For multiple meshes with different model matrices, we need to update the camera_buffer between draws. Since we can't write_buffer inside a render pass, we have two options:
|
||||
- (A) Use a single model matrix (identity) and position cubes via vertex data
|
||||
- (B) Use dynamic uniform buffer offsets
|
||||
|
||||
For simplicity, option (A): pre-transform cube vertices at different positions. The model matrix in CameraUniform stays identity.
|
||||
|
||||
- [ ] **Step 6: Build and test**
|
||||
|
||||
Run: `cargo build -p editor_demo`
|
||||
Expected: compiles
|
||||
|
||||
Run: `cargo test -p voltex_editor -- --nocapture`
|
||||
Expected: all tests pass (orbit_camera + dock tests)
|
||||
|
||||
- [ ] **Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add crates/voltex_editor/Cargo.toml examples/editor_demo/src/main.rs
|
||||
git commit -m "feat(editor): integrate 3D viewport into editor_demo"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Update docs
|
||||
|
||||
**Files:**
|
||||
- Modify: `docs/STATUS.md`
|
||||
- Modify: `docs/DEFERRED.md`
|
||||
|
||||
- [ ] **Step 1: Update STATUS.md**
|
||||
|
||||
Add to Phase 8-4:
|
||||
```
|
||||
- voltex_editor: ViewportTexture, ViewportRenderer, OrbitCamera (offscreen 3D viewport)
|
||||
```
|
||||
|
||||
Update test count.
|
||||
|
||||
- [ ] **Step 2: Update DEFERRED.md**
|
||||
|
||||
```
|
||||
- ~~**씬 뷰포트**~~ ✅ 오프스크린 렌더링 + 오빗 카메라 완료. Blinn-Phong forward만 (디퍼드/PBR 미지원).
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add docs/STATUS.md docs/DEFERRED.md
|
||||
git commit -m "docs: update STATUS.md and DEFERRED.md with scene viewport"
|
||||
```
|
||||
421
docs/superpowers/plans/2026-03-26-ttf-font.md
Normal file
421
docs/superpowers/plans/2026-03-26-ttf-font.md
Normal file
@@ -0,0 +1,421 @@
|
||||
# TTF Font Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Self-implemented TTF parser + rasterizer with on-demand glyph cache atlas for variable-width Unicode text rendering in the editor.
|
||||
|
||||
**Architecture:** 4 modules: TtfParser (binary parsing) → Rasterizer (outline→bitmap) → GlyphCache (atlas management) → TtfFont (unified interface). All pure CPU logic, no GPU dependency for core. GPU integration only in editor_demo.
|
||||
|
||||
**Tech Stack:** Rust, std only (no external crates). wgpu for atlas texture upload in editor integration.
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-03-26-ttf-font-design.md`
|
||||
|
||||
**Test font:** Tests need a real TTF file. Use a system font or download a small open-source font. For tests, embed a minimal subset or use `C:/Windows/Fonts/arial.ttf` which is available on Windows.
|
||||
|
||||
---
|
||||
|
||||
### Task 1: TtfParser — table directory + head/hhea/maxp
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/ttf_parser.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Write tests and implement table directory parsing**
|
||||
|
||||
Create `ttf_parser.rs` with byte-reading helpers and table directory parsing:
|
||||
|
||||
```rust
|
||||
// Byte reading helpers (big-endian, TTF standard)
|
||||
fn read_u16(data: &[u8], offset: usize) -> u16
|
||||
fn read_i16(data: &[u8], offset: usize) -> i16
|
||||
fn read_u32(data: &[u8], offset: usize) -> u32
|
||||
|
||||
// Table directory entry
|
||||
struct TableRecord {
|
||||
tag: [u8; 4],
|
||||
offset: u32,
|
||||
length: u32,
|
||||
}
|
||||
|
||||
pub struct TtfParser {
|
||||
data: Vec<u8>,
|
||||
tables: HashMap<[u8; 4], TableRecord>,
|
||||
pub units_per_em: u16,
|
||||
pub num_glyphs: u16,
|
||||
pub ascender: i16,
|
||||
pub descender: i16,
|
||||
pub line_gap: i16,
|
||||
pub num_h_metrics: u16,
|
||||
pub loca_format: i16,
|
||||
}
|
||||
```
|
||||
|
||||
Parse: offset table → table directory → head (units_per_em, indexToLocFormat) → hhea (ascender, descender, lineGap, numberOfHMetrics) → maxp (numGlyphs).
|
||||
|
||||
Tests:
|
||||
- `test_parse_loads_tables`: Load a TTF file, verify tables exist (head, cmap, glyf, etc.)
|
||||
- `test_head_values`: units_per_em > 0, loca_format is 0 or 1
|
||||
- `test_hhea_values`: ascender > 0, descender < 0 (typically)
|
||||
- `test_maxp_values`: num_glyphs > 0
|
||||
|
||||
- [ ] **Step 2: Run tests**
|
||||
|
||||
Run: `cargo test -p voltex_editor --lib ttf_parser -- --nocapture`
|
||||
|
||||
- [ ] **Step 3: Add module to lib.rs**
|
||||
|
||||
```rust
|
||||
pub mod ttf_parser;
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add TTF parser with table directory and header parsing"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: TtfParser — cmap (Unicode → glyph index)
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/ttf_parser.rs`
|
||||
|
||||
- [ ] **Step 1: Implement cmap Format 4 parsing**
|
||||
|
||||
Add `glyph_index(codepoint: u32) -> u16` method:
|
||||
- Find cmap table → find Format 4 subtable (platformID=3, encodingID=1 or platformID=0)
|
||||
- Parse segCount, endCode[], startCode[], idDelta[], idRangeOffset[]
|
||||
- Binary search for the segment containing the codepoint
|
||||
- Calculate glyph index
|
||||
|
||||
Tests:
|
||||
- `test_cmap_ascii`: 'A' (0x41) → non-zero glyph index
|
||||
- `test_cmap_space`: ' ' (0x20) → valid glyph index
|
||||
- `test_cmap_unmapped`: very high codepoint → 0 (missing glyph)
|
||||
- `test_cmap_multiple_chars`: several chars return distinct indices
|
||||
|
||||
- [ ] **Step 2: Run tests, commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add cmap Format 4 parsing for Unicode glyph mapping"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: TtfParser — glyf (glyph outlines) + hmtx (metrics)
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/ttf_parser.rs`
|
||||
|
||||
- [ ] **Step 1: Implement loca + glyf parsing**
|
||||
|
||||
Add:
|
||||
```rust
|
||||
pub struct OutlinePoint {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub on_curve: bool,
|
||||
}
|
||||
|
||||
pub struct GlyphOutline {
|
||||
pub contours: Vec<Vec<OutlinePoint>>,
|
||||
pub x_min: i16,
|
||||
pub y_min: i16,
|
||||
pub x_max: i16,
|
||||
pub y_max: i16,
|
||||
}
|
||||
|
||||
pub struct GlyphMetrics {
|
||||
pub advance_width: u16,
|
||||
pub left_side_bearing: i16,
|
||||
}
|
||||
```
|
||||
|
||||
Methods:
|
||||
- `glyph_offset(glyph_id: u16) -> usize` — loca table lookup
|
||||
- `glyph_outline(glyph_id: u16) -> Option<GlyphOutline>` — glyf table parse
|
||||
- Simple glyph: endPtsOfContours, flags, x/y deltas
|
||||
- Handle on_curve/off_curve: insert implicit on-curve points between consecutive off-curve
|
||||
- Compound glyph: skip for now (return None)
|
||||
- `glyph_metrics(glyph_id: u16) -> GlyphMetrics` — hmtx table
|
||||
|
||||
Tests:
|
||||
- `test_glyph_outline_has_contours`: 'A' outline has >= 1 contour
|
||||
- `test_glyph_outline_points`: contour points count > 0
|
||||
- `test_glyph_metrics_advance`: 'A' advance > 0
|
||||
- `test_space_glyph_no_contours`: ' ' has 0 contours (or None)
|
||||
|
||||
- [ ] **Step 2: Run tests, commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add glyf outline and hmtx metrics parsing"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Rasterizer
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/rasterizer.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Implement bezier flattening + scanline rasterizer**
|
||||
|
||||
```rust
|
||||
pub struct RasterResult {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub bitmap: Vec<u8>, // R8 alpha
|
||||
pub offset_x: f32,
|
||||
pub offset_y: f32,
|
||||
}
|
||||
|
||||
/// Flatten quadratic bezier to line segments.
|
||||
fn flatten_quadratic(p0: (f32,f32), p1: (f32,f32), p2: (f32,f32), segments: &mut Vec<(f32,f32)>);
|
||||
|
||||
/// Rasterize outline at given scale.
|
||||
pub fn rasterize(outline: &GlyphOutline, scale: f32, units_per_em: u16) -> RasterResult;
|
||||
```
|
||||
|
||||
Rasterize steps:
|
||||
1. Scale outline points: `px = (x - x_min) * scale`, `py = (y_max - y) * scale` (Y flip)
|
||||
2. Walk contours: on_curve→on_curve = line, on_curve→off_curve→on_curve = quadratic bezier
|
||||
3. Flatten all curves into edges (line segments)
|
||||
4. For each scanline y: find all edge intersections, sort by x
|
||||
5. Fill using non-zero winding rule
|
||||
6. Output alpha bitmap
|
||||
|
||||
Tests:
|
||||
- `test_rasterize_produces_bitmap`: rasterize 'A' at 32px → width>0, height>0, bitmap non-empty
|
||||
- `test_rasterize_has_filled_pixels`: bitmap has at least some non-zero pixels
|
||||
- `test_rasterize_space`: space glyph → 0-width or empty bitmap
|
||||
- `test_flatten_quadratic`: known bezier → correct number of segments
|
||||
|
||||
- [ ] **Step 2: Add module to lib.rs, run tests, commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add glyph rasterizer with bezier flattening and scanline fill"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: GlyphCache (on-demand atlas)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/glyph_cache.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Implement GlyphCache**
|
||||
|
||||
```rust
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const ATLAS_SIZE: u32 = 1024;
|
||||
|
||||
pub struct GlyphInfo {
|
||||
pub uv: [f32; 4], // u0, v0, u1, v1
|
||||
pub width: f32,
|
||||
pub height: f32,
|
||||
pub advance: f32,
|
||||
pub bearing_x: f32,
|
||||
pub bearing_y: f32,
|
||||
}
|
||||
|
||||
pub struct GlyphCache {
|
||||
pub atlas_data: Vec<u8>,
|
||||
pub atlas_width: u32,
|
||||
pub atlas_height: u32,
|
||||
glyphs: HashMap<char, GlyphInfo>,
|
||||
cursor_x: u32,
|
||||
cursor_y: u32,
|
||||
row_height: u32,
|
||||
pub dirty: bool,
|
||||
}
|
||||
```
|
||||
|
||||
Methods:
|
||||
- `new(width, height)` → zeroed atlas_data
|
||||
- `get(ch) -> Option<&GlyphInfo>`
|
||||
- `insert(ch, bitmap, width, height, advance, bearing_x, bearing_y) -> &GlyphInfo`
|
||||
- Copy bitmap into atlas at (cursor_x, cursor_y)
|
||||
- Advance cursor, handle row overflow
|
||||
- Set dirty = true
|
||||
- `clear_dirty()`
|
||||
|
||||
Tests:
|
||||
- `test_insert_and_get`: insert a 10x10 glyph → get returns correct UV
|
||||
- `test_cache_hit`: insert then get same char → same UV
|
||||
- `test_row_wrap`: insert many wide glyphs → cursor_y advances to next row
|
||||
- `test_uv_coordinates`: UV values are in 0.0-1.0 range and match position
|
||||
|
||||
- [ ] **Step 2: Add module, run tests, commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add GlyphCache on-demand atlas manager"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 6: TtfFont (unified interface)
|
||||
|
||||
**Files:**
|
||||
- Create: `crates/voltex_editor/src/ttf_font.rs`
|
||||
- Modify: `crates/voltex_editor/src/lib.rs`
|
||||
|
||||
- [ ] **Step 1: Implement TtfFont**
|
||||
|
||||
```rust
|
||||
pub struct TtfFont {
|
||||
parser: TtfParser,
|
||||
cache: GlyphCache,
|
||||
pub font_size: f32,
|
||||
pub line_height: f32,
|
||||
pub ascender: f32,
|
||||
pub descender: f32,
|
||||
scale: f32,
|
||||
}
|
||||
|
||||
impl TtfFont {
|
||||
pub fn new(data: &[u8], font_size: f32) -> Result<Self, String>;
|
||||
pub fn glyph(&mut self, ch: char) -> &GlyphInfo; // cache lookup or rasterize+insert
|
||||
pub fn text_width(&mut self, text: &str) -> f32;
|
||||
pub fn atlas_data(&self) -> &[u8];
|
||||
pub fn atlas_size(&self) -> (u32, u32);
|
||||
pub fn is_dirty(&self) -> bool;
|
||||
pub fn clear_dirty(&mut self);
|
||||
}
|
||||
```
|
||||
|
||||
The `glyph` method:
|
||||
1. Check cache.get(ch)
|
||||
2. If miss: parser.glyph_index → parser.glyph_outline → rasterize → cache.insert
|
||||
3. Return &GlyphInfo
|
||||
|
||||
Tests:
|
||||
- `test_ttf_font_new`: loads TTF file, font_size = 24
|
||||
- `test_glyph_caches`: call glyph('A') twice → second is cache hit (dirty resets)
|
||||
- `test_text_width`: "Hello" width > 0, "Hello World" width > "Hello" width
|
||||
- `test_korean_glyph`: '가' glyph loads without panic (if font supports it)
|
||||
|
||||
- [ ] **Step 2: Add module, run tests, commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): add TtfFont unified interface with glyph caching"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 7: UI integration + editor_demo
|
||||
|
||||
**Files:**
|
||||
- Modify: `crates/voltex_editor/src/ui_context.rs`
|
||||
- Modify: `crates/voltex_editor/src/renderer.rs`
|
||||
- Modify: `examples/editor_demo/src/main.rs`
|
||||
|
||||
- [ ] **Step 1: Add ttf_font to UiContext + draw_text helper**
|
||||
|
||||
In `ui_context.rs`, add:
|
||||
```rust
|
||||
pub ttf_font: Option<crate::ttf_font::TtfFont>,
|
||||
```
|
||||
|
||||
Add helper method:
|
||||
```rust
|
||||
pub fn draw_text(&mut self, text: &str, x: f32, y: f32, color: [u8; 4]) {
|
||||
if let Some(ref mut ttf) = self.ttf_font {
|
||||
let mut cx = x;
|
||||
for ch in text.chars() {
|
||||
let glyph = ttf.glyph(ch);
|
||||
let gx = cx + glyph.bearing_x;
|
||||
let gy = y + self.font.glyph_height as f32 - glyph.bearing_y; // baseline align
|
||||
let (u0, v0, u1, v1) = (glyph.uv[0], glyph.uv[1], glyph.uv[2], glyph.uv[3]);
|
||||
if glyph.width > 0.0 && glyph.height > 0.0 {
|
||||
self.draw_list.add_rect_uv(gx, gy, glyph.width, glyph.height, u0, v0, u1, v1, color);
|
||||
}
|
||||
cx += glyph.advance;
|
||||
}
|
||||
} else {
|
||||
// Fallback to bitmap font
|
||||
let gw = self.font.glyph_width as f32;
|
||||
let gh = self.font.glyph_height as f32;
|
||||
let mut cx = x;
|
||||
for ch in text.chars() {
|
||||
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
|
||||
self.draw_list.add_rect_uv(cx, y, gw, gh, u0, v0, u1, v1, color);
|
||||
cx += gw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ttf_text_width(&mut self, text: &str) -> f32 {
|
||||
if let Some(ref mut ttf) = self.ttf_font {
|
||||
ttf.text_width(text)
|
||||
} else {
|
||||
text.len() as f32 * self.font.glyph_width as f32
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update UiRenderer for TTF atlas**
|
||||
|
||||
In `renderer.rs`, add support for a second texture (TTF atlas):
|
||||
- When TtfFont is active, create a TTF atlas texture (R8Unorm, 1024x1024)
|
||||
- Each frame: check dirty → upload atlas data
|
||||
- Create a second bind group for TTF texture
|
||||
- DrawCommands need to know which texture to use
|
||||
|
||||
Simplest approach: add a `ttf_atlas_texture` and `ttf_bind_group` to UiRenderer. When rendering, if TTF is active, use the TTF bind group for all draws (since TTF atlas includes all rendered glyphs).
|
||||
|
||||
Actually, simpler: just replace the font atlas texture with the TTF atlas when TTF is active. The shader is the same (R8 alpha sampling).
|
||||
|
||||
- [ ] **Step 3: Update editor_demo**
|
||||
|
||||
In `resumed`:
|
||||
```rust
|
||||
// Load TTF font
|
||||
let ttf_data = std::fs::read("C:/Windows/Fonts/arial.ttf")
|
||||
.or_else(|_| std::fs::read("C:/Windows/Fonts/malgun.ttf"))
|
||||
.ok();
|
||||
if let Some(data) = ttf_data {
|
||||
if let Ok(font) = TtfFont::new(&data, 16.0) {
|
||||
state.ui.ttf_font = Some(font);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 4: Build and test**
|
||||
|
||||
Run: `cargo build -p editor_demo`
|
||||
Run: `cargo test -p voltex_editor`
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "feat(editor): integrate TTF font rendering into UI system and editor_demo"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 8: Update docs
|
||||
|
||||
**Files:**
|
||||
- Modify: `docs/STATUS.md`
|
||||
- Modify: `docs/DEFERRED.md`
|
||||
|
||||
- [ ] **Step 1: Update STATUS.md**
|
||||
|
||||
Add: `- voltex_editor: TtfParser, Rasterizer, GlyphCache, TtfFont (TTF rendering, Unicode)`
|
||||
Update test count.
|
||||
|
||||
- [ ] **Step 2: Update DEFERRED.md**
|
||||
|
||||
```
|
||||
- ~~**TTF 폰트**~~ ✅ 자체 TTF 파서 + 래스터라이저 + 온디맨드 캐시 완료. 한글/유니코드 지원.
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git commit -m "docs: update STATUS.md and DEFERRED.md with TTF font support"
|
||||
```
|
||||
176
docs/superpowers/specs/2026-03-26-asset-browser-design.md
Normal file
176
docs/superpowers/specs/2026-03-26-asset-browser-design.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# Asset Browser Design
|
||||
|
||||
## Overview
|
||||
|
||||
에디터 도킹 패널에 파일 시스템 탐색기를 추가한다. 지정된 에셋 루트 디렉토리에서 파일/폴더를 플랫 리스트로 표시하고, 폴더 이동/뒤로가기를 지원. 읽기 전용.
|
||||
|
||||
## Scope
|
||||
|
||||
- 에셋 루트 디렉토리 기준 파일/폴더 목록 표시
|
||||
- 폴더 클릭 → 해당 폴더로 이동
|
||||
- 뒤로가기 (부모 디렉토리, root까지만)
|
||||
- 파일 선택 시 이름/크기/확장자 정보 표시
|
||||
- 로딩/프리뷰 없음 (읽기 전용)
|
||||
|
||||
## Data Model
|
||||
|
||||
```rust
|
||||
pub struct AssetBrowser {
|
||||
root: PathBuf,
|
||||
current: PathBuf,
|
||||
entries: Vec<DirEntry>,
|
||||
selected_file: Option<String>, // 선택된 파일 이름
|
||||
}
|
||||
|
||||
struct DirEntry {
|
||||
name: String,
|
||||
is_dir: bool,
|
||||
size: u64,
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
```rust
|
||||
impl AssetBrowser {
|
||||
/// 에셋 루트 디렉토리로 초기화. entries를 즉시 스캔.
|
||||
pub fn new(root: PathBuf) -> Self;
|
||||
|
||||
/// 현재 디렉토리의 파일/폴더를 다시 스캔.
|
||||
pub fn refresh(&mut self);
|
||||
|
||||
/// 하위 폴더로 이동. root 바깥으로 나가지 못하게 가드.
|
||||
/// 실제 디렉토리가 아니면 무시.
|
||||
pub fn navigate_to(&mut self, dir_name: &str);
|
||||
|
||||
/// 부모 디렉토리로 이동. root에서는 무시.
|
||||
pub fn go_up(&mut self);
|
||||
|
||||
/// 현재 경로 (root 기준 상대 경로 문자열).
|
||||
pub fn relative_path(&self) -> String;
|
||||
|
||||
/// 현재 entries 접근.
|
||||
pub fn entries(&self) -> &[DirEntry];
|
||||
}
|
||||
|
||||
/// 에셋 브라우저 패널 UI 렌더링.
|
||||
pub fn asset_browser_panel(
|
||||
ui: &mut UiContext,
|
||||
browser: &mut AssetBrowser,
|
||||
rect: &Rect,
|
||||
);
|
||||
```
|
||||
|
||||
## refresh 로직
|
||||
|
||||
```rust
|
||||
fn refresh(&mut self) {
|
||||
self.entries.clear();
|
||||
self.selected_file = None;
|
||||
if let Ok(read_dir) = std::fs::read_dir(&self.current) {
|
||||
for entry in read_dir.flatten() {
|
||||
let meta = entry.metadata().ok();
|
||||
let is_dir = meta.as_ref().map_or(false, |m| m.is_dir());
|
||||
let size = meta.as_ref().map_or(0, |m| m.len());
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
self.entries.push(DirEntry { name, is_dir, size });
|
||||
}
|
||||
}
|
||||
// 정렬: 폴더 먼저, 그 다음 파일. 각각 이름순.
|
||||
self.entries.sort_by(|a, b| {
|
||||
b.is_dir.cmp(&a.is_dir).then(a.name.cmp(&b.name))
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## navigate_to 로직
|
||||
|
||||
```rust
|
||||
fn navigate_to(&mut self, dir_name: &str) {
|
||||
let target = self.current.join(dir_name);
|
||||
// root 바깥 가드: target이 root의 하위여야 함
|
||||
if target.starts_with(&self.root) && target.is_dir() {
|
||||
self.current = target;
|
||||
self.refresh();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## go_up 로직
|
||||
|
||||
```rust
|
||||
fn go_up(&mut self) {
|
||||
if self.current != self.root {
|
||||
if let Some(parent) = self.current.parent() {
|
||||
if parent.starts_with(&self.root) || parent == self.root {
|
||||
self.current = parent.to_path_buf();
|
||||
self.refresh();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## UI 렌더링
|
||||
|
||||
```
|
||||
asset_browser_panel(ui, browser, rect):
|
||||
ui.layout = LayoutState::new(rect.x + 4, rect.y + 4)
|
||||
|
||||
// 현재 경로
|
||||
ui.text(&format!("Path: {}", browser.relative_path()))
|
||||
|
||||
// 뒤로가기 (root가 아닐 때)
|
||||
if current != root:
|
||||
if ui.button("[..]"):
|
||||
browser.go_up()
|
||||
|
||||
// 항목 목록
|
||||
for entry in browser.entries():
|
||||
if entry.is_dir:
|
||||
label = format!("[D] {}", entry.name)
|
||||
else:
|
||||
label = format!(" {}", entry.name)
|
||||
|
||||
// 클릭 가능한 텍스트 행
|
||||
하이라이트 if selected
|
||||
if clicked:
|
||||
if entry.is_dir:
|
||||
browser.navigate_to(&entry.name)
|
||||
else:
|
||||
browser.selected_file = Some(entry.name.clone())
|
||||
|
||||
// 선택된 파일 정보
|
||||
if let Some(file) = &browser.selected_file:
|
||||
ui.text("-- File Info --")
|
||||
ui.text(&format!("Name: {}", file))
|
||||
// entries에서 찾아서 크기 표시
|
||||
if found:
|
||||
ui.text(&format!("Size: {}", format_size(size)))
|
||||
ui.text(&format!("Type: {}", extension))
|
||||
```
|
||||
|
||||
크기 포맷: `format_size(bytes)` → "123 B", "4.5 KB", "1.2 MB"
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_editor/src/asset_browser.rs` — AssetBrowser, DirEntry, asset_browser_panel, format_size
|
||||
- `crates/voltex_editor/src/lib.rs` — 모듈 추가
|
||||
- `examples/editor_demo/src/main.rs` — Console 패널(panel 3)을 AssetBrowser로 교체
|
||||
|
||||
## Testing
|
||||
|
||||
### AssetBrowser 로직 (파일 시스템 사용, GPU 불필요)
|
||||
- `test_new_scans_entries`: 임시 디렉토리에 파일/폴더 생성 → new() → entries 확인
|
||||
- `test_refresh`: 파일 추가 후 refresh → entries 업데이트 확인
|
||||
- `test_navigate_to`: 하위 폴더로 이동 → current 변경, entries 변경 확인
|
||||
- `test_go_up`: 하위에서 go_up → 부모로 이동 확인
|
||||
- `test_go_up_at_root`: root에서 go_up → current 변경 안 됨
|
||||
- `test_root_guard`: root 바깥 경로 시도 → 무시
|
||||
- `test_entries_sorted`: 폴더가 파일보다 먼저, 이름순
|
||||
|
||||
### format_size
|
||||
- `test_format_size`: 0, 500, 1024, 1500000 등
|
||||
|
||||
### UI (draw_list 검증)
|
||||
- `test_panel_draws_commands`: browser + UiContext → draw_list에 커맨드 생성 확인
|
||||
116
docs/superpowers/specs/2026-03-26-auto-exposure-design.md
Normal file
116
docs/superpowers/specs/2026-03-26-auto-exposure-design.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Auto Exposure Design
|
||||
|
||||
## Overview
|
||||
|
||||
컴퓨트 셰이더로 HDR 타겟의 평균 log-luminance를 계산하고, 시간 적응형 노출 조절을 톤맵에 적용한다.
|
||||
|
||||
## Scope
|
||||
|
||||
- 컴퓨트 셰이더: HDR 텍스처 → log-luminance 평균 (2-pass 다운샘플)
|
||||
- AutoExposure 구조체: 노출 계산 + 시간 보간
|
||||
- tonemap_shader 수정: exposure uniform 적용
|
||||
|
||||
## AutoExposure
|
||||
|
||||
```rust
|
||||
pub struct AutoExposure {
|
||||
reduce_pipeline: wgpu::ComputePipeline,
|
||||
reduce_bind_group_layout: wgpu::BindGroupLayout,
|
||||
luminance_buffer: wgpu::Buffer, // 최종 평균 luminance (f32 1개)
|
||||
staging_buffer: wgpu::Buffer, // GPU→CPU readback
|
||||
pub exposure: f32,
|
||||
pub min_exposure: f32,
|
||||
pub max_exposure: f32,
|
||||
pub adaptation_speed: f32,
|
||||
pub key_value: f32, // 목표 밝기 (0.18 기본)
|
||||
}
|
||||
```
|
||||
|
||||
## 알고리즘
|
||||
|
||||
### Pass 1: Log-Luminance 합산 (Compute)
|
||||
|
||||
HDR 텍스처의 모든 픽셀에 대해:
|
||||
```
|
||||
luminance = 0.2126*R + 0.7152*G + 0.0722*B
|
||||
log_lum = log(max(luminance, 0.0001))
|
||||
```
|
||||
|
||||
워크그룹 내 shared memory로 합산, atomicAdd로 글로벌 버퍼에 누적.
|
||||
|
||||
단순화: **단일 컴퓨트 디스패치**로 전체 텍스처를 16x16 워크그룹으로 처리. 각 워크그룹이 자기 영역의 합을 atomic으로 글로벌 버퍼에 더함.
|
||||
|
||||
### Pass 2: CPU에서 평균 계산 + 노출 결정
|
||||
|
||||
```rust
|
||||
let avg_log_lum = total_log_lum / pixel_count;
|
||||
let avg_lum = exp(avg_log_lum);
|
||||
let target_exposure = key_value / avg_lum;
|
||||
let clamped = target_exposure.clamp(min_exposure, max_exposure);
|
||||
// 시간 적응 (부드러운 전환)
|
||||
exposure = lerp(exposure, clamped, 1.0 - exp(-dt * adaptation_speed));
|
||||
```
|
||||
|
||||
### Tonemap 수정
|
||||
|
||||
기존 tonemap_shader에 exposure 곱하기:
|
||||
```wgsl
|
||||
// 기존: let color = hdr_color;
|
||||
// 변경:
|
||||
let exposed = hdr_color * exposure;
|
||||
// 그 다음 ACES tonemap
|
||||
```
|
||||
|
||||
tonemap uniform에 `exposure: f32` 필드 추가.
|
||||
|
||||
## Compute Shader (auto_exposure.wgsl)
|
||||
|
||||
```wgsl
|
||||
@group(0) @binding(0) var hdr_texture: texture_2d<f32>;
|
||||
@group(0) @binding(1) var<storage, read_write> result: array<atomic<u32>>;
|
||||
// result[0] = sum of log_lum * 1000 (fixed point)
|
||||
// result[1] = pixel count
|
||||
|
||||
@compute @workgroup_size(16, 16)
|
||||
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
|
||||
let dims = textureDimensions(hdr_texture);
|
||||
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
|
||||
|
||||
let color = textureLoad(hdr_texture, vec2<i32>(gid.xy), 0);
|
||||
let lum = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b;
|
||||
let log_lum = log(max(lum, 0.0001));
|
||||
|
||||
// Fixed-point: multiply by 1000 and cast to i32, then atomic add
|
||||
let fixed = i32(log_lum * 1000.0);
|
||||
atomicAdd(&result[0], u32(bitcast<u32>(fixed)));
|
||||
atomicAdd(&result[1], 1u);
|
||||
}
|
||||
```
|
||||
|
||||
CPU에서 readback 후:
|
||||
```rust
|
||||
let sum_fixed = i32::from_le_bytes(data[0..4]);
|
||||
let count = u32::from_le_bytes(data[4..8]);
|
||||
let avg_log_lum = (sum_fixed as f32 / 1000.0) / count as f32;
|
||||
```
|
||||
|
||||
## GPU→CPU Readback
|
||||
|
||||
- luminance_buffer: STORAGE | COPY_SRC
|
||||
- staging_buffer: MAP_READ | COPY_DST
|
||||
- encoder.copy_buffer_to_buffer → staging
|
||||
- staging.slice(..).map_async(MapMode::Read) → 다음 프레임에 읽기 (1프레임 지연 허용)
|
||||
|
||||
1프레임 지연은 auto exposure에서 자연스러움 (사람 눈도 적응에 시간 걸림).
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_renderer/src/auto_exposure.rs` — AutoExposure 구조체
|
||||
- `crates/voltex_renderer/src/auto_exposure.wgsl` — 컴퓨트 셰이더
|
||||
- `crates/voltex_renderer/src/lib.rs` — 모듈 추가
|
||||
|
||||
## Testing
|
||||
|
||||
- exposure 계산 로직 (순수 수학): avg_lum → target_exposure → clamp → lerp
|
||||
- min/max clamp 검증
|
||||
- adaptation lerp 검증 (dt=0 → 변화 없음, dt=큰값 → 목표에 수렴)
|
||||
68
docs/superpowers/specs/2026-03-26-changed-filter-design.md
Normal file
68
docs/superpowers/specs/2026-03-26-changed-filter-design.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Changed Filter Design
|
||||
|
||||
## Overview
|
||||
|
||||
ECS SparseSet에 tick 기반 변경 감지를 추가하여 `query_changed::<T>()` 로 수정된 컴포넌트만 순회할 수 있게 한다.
|
||||
|
||||
## Scope
|
||||
|
||||
- SparseSet에 변경 tick 추적
|
||||
- get_mut, insert 시 변경 마크
|
||||
- World::query_changed, clear_changed
|
||||
|
||||
## SparseSet 변경
|
||||
|
||||
```rust
|
||||
pub struct SparseSet<T> {
|
||||
sparse: Vec<Option<usize>>,
|
||||
dense_entities: Vec<Entity>,
|
||||
dense_data: Vec<T>,
|
||||
// 새로 추가:
|
||||
ticks: Vec<u64>, // dense_data와 동일 인덱스, 마지막 변경 틱
|
||||
current_tick: u64, // 현재 틱 (clear_changed에서 증가)
|
||||
}
|
||||
```
|
||||
|
||||
### 변경 감지 로직
|
||||
- `insert(entity, value)`: tick 기록 `ticks.push(current_tick)`
|
||||
- `get_mut(entity)`: `ticks[index] = current_tick` (접근 시 변경 마크)
|
||||
- `get(entity)`: tick 변경 없음 (읽기만)
|
||||
- `remove(entity)`: swap-remove 시 ticks도 동일하게 swap
|
||||
- `is_changed(entity)`: `ticks[index] == current_tick`
|
||||
- `increment_tick()`: `current_tick += 1`
|
||||
- `changed_entities()`: ticks == current_tick인 엔티티 목록
|
||||
|
||||
## World 변경
|
||||
|
||||
```rust
|
||||
impl World {
|
||||
/// 현재 틱에 변경된 T 컴포넌트를 가진 엔티티 순회
|
||||
pub fn query_changed<T: 'static>(&self) -> Vec<(Entity, &T)>;
|
||||
|
||||
/// 모든 컴포넌트 스토리지의 tick 증가 (프레임 끝에 호출)
|
||||
pub fn clear_changed(&mut self);
|
||||
}
|
||||
```
|
||||
|
||||
### clear_changed 구현
|
||||
ComponentStorage trait에 `increment_tick()` 메서드 추가. World가 모든 스토리지를 순회하며 호출.
|
||||
|
||||
## ComponentStorage trait 확장
|
||||
|
||||
```rust
|
||||
pub trait ComponentStorage: Any {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
fn remove_entity(&mut self, entity: Entity);
|
||||
fn increment_tick(&mut self); // NEW
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
- `test_insert_is_changed`: insert 후 is_changed → true
|
||||
- `test_get_mut_marks_changed`: get_mut 후 is_changed → true
|
||||
- `test_get_not_changed`: get 후 is_changed → false (이전 틱에 insert한 경우)
|
||||
- `test_clear_changed`: insert → clear_changed → is_changed → false
|
||||
- `test_query_changed`: 3개 엔티티 중 1개만 get_mut → query_changed → 1개만
|
||||
- `test_remove_preserves_ticks`: swap-remove 후 남은 엔티티 tick 정확성
|
||||
208
docs/superpowers/specs/2026-03-26-ecs-integration-design.md
Normal file
208
docs/superpowers/specs/2026-03-26-ecs-integration-design.md
Normal file
@@ -0,0 +1,208 @@
|
||||
# ECS Integration Design (Audio + AI)
|
||||
|
||||
## Overview
|
||||
|
||||
AudioSource와 NavAgent ECS 컴포넌트를 추가하여 오디오와 AI를 엔티티 시스템에 연결한다.
|
||||
|
||||
## Scope
|
||||
|
||||
- AudioSource 컴포넌트 + audio_sync_system
|
||||
- NavAgent 컴포넌트 + nav_agent_system
|
||||
- 각 crate에 voltex_ecs 의존성 추가
|
||||
|
||||
## AudioSource
|
||||
|
||||
`crates/voltex_audio/src/audio_source.rs`
|
||||
|
||||
```rust
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AudioSource {
|
||||
pub clip_id: u32,
|
||||
pub volume: f32,
|
||||
pub spatial: bool,
|
||||
pub looping: bool,
|
||||
pub playing: bool,
|
||||
pub played_once: bool, // 이미 재생 시작했는지 (중복 방지)
|
||||
}
|
||||
|
||||
impl AudioSource {
|
||||
pub fn new(clip_id: u32) -> Self {
|
||||
AudioSource {
|
||||
clip_id,
|
||||
volume: 1.0,
|
||||
spatial: false,
|
||||
looping: false,
|
||||
playing: false,
|
||||
played_once: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spatial(mut self) -> Self { self.spatial = true; self }
|
||||
pub fn looping(mut self) -> Self { self.looping = true; self }
|
||||
pub fn play(&mut self) { self.playing = true; self.played_once = false; }
|
||||
pub fn stop(&mut self) { self.playing = false; }
|
||||
}
|
||||
```
|
||||
|
||||
### audio_sync_system
|
||||
|
||||
```rust
|
||||
use voltex_ecs::world::World;
|
||||
use voltex_ecs::transform::Transform;
|
||||
|
||||
pub fn audio_sync_system(world: &mut World, audio: &mut AudioSystem) {
|
||||
for (entity, transform, source) in world.query2::<Transform, AudioSource>() {
|
||||
if source.playing && !source.played_once {
|
||||
if source.spatial {
|
||||
audio.play_3d(source.clip_id, transform.position, source.volume);
|
||||
} else {
|
||||
audio.play(source.clip_id, source.volume);
|
||||
}
|
||||
// Mark as started (don't re-trigger every frame)
|
||||
// Need mutable access — use get_mut after query
|
||||
}
|
||||
}
|
||||
// Separate pass for mutation (borrow rules)
|
||||
let entities: Vec<_> = world.query::<AudioSource>()
|
||||
.filter(|(_, s)| s.playing && !s.played_once)
|
||||
.map(|(e, _)| e)
|
||||
.collect();
|
||||
for entity in entities {
|
||||
if let Some(source) = world.get_mut::<AudioSource>(entity) {
|
||||
source.played_once = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## NavAgent
|
||||
|
||||
`crates/voltex_ai/src/nav_agent.rs`
|
||||
|
||||
```rust
|
||||
use voltex_math::Vec3;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NavAgent {
|
||||
pub target: Option<Vec3>,
|
||||
pub speed: f32,
|
||||
pub path: Vec<Vec3>,
|
||||
pub current_waypoint: usize,
|
||||
pub reached: bool,
|
||||
}
|
||||
|
||||
impl NavAgent {
|
||||
pub fn new(speed: f32) -> Self {
|
||||
NavAgent {
|
||||
target: None,
|
||||
speed,
|
||||
path: Vec::new(),
|
||||
current_waypoint: 0,
|
||||
reached: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_target(&mut self, target: Vec3) {
|
||||
self.target = Some(target);
|
||||
self.path.clear();
|
||||
self.current_waypoint = 0;
|
||||
self.reached = false;
|
||||
}
|
||||
|
||||
pub fn clear_target(&mut self) {
|
||||
self.target = None;
|
||||
self.path.clear();
|
||||
self.current_waypoint = 0;
|
||||
self.reached = false;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### nav_agent_system
|
||||
|
||||
```rust
|
||||
use voltex_ecs::world::World;
|
||||
use voltex_ecs::transform::Transform;
|
||||
|
||||
pub fn nav_agent_system(world: &mut World, navmesh: &NavMesh, dt: f32) {
|
||||
// Collect entities that need path finding
|
||||
let needs_path: Vec<_> = world.query2::<Transform, NavAgent>()
|
||||
.filter(|(_, _, agent)| agent.target.is_some() && agent.path.is_empty() && !agent.reached)
|
||||
.map(|(e, t, a)| (e, t.position, a.target.unwrap()))
|
||||
.collect();
|
||||
|
||||
// Find paths
|
||||
for (entity, start, goal) in needs_path {
|
||||
if let Some(path) = navmesh.find_path(start, goal) {
|
||||
if let Some(agent) = world.get_mut::<NavAgent>(entity) {
|
||||
agent.path = path;
|
||||
agent.current_waypoint = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move agents along paths
|
||||
let moving: Vec<_> = world.query2::<Transform, NavAgent>()
|
||||
.filter(|(_, _, a)| !a.path.is_empty() && !a.reached)
|
||||
.map(|(e, t, a)| (e, t.position, a.speed, a.path[a.current_waypoint], a.current_waypoint, a.path.len()))
|
||||
.collect();
|
||||
|
||||
for (entity, pos, speed, waypoint, wp_idx, path_len) in moving {
|
||||
let dir = waypoint - pos;
|
||||
let dist = dir.length();
|
||||
let arrival_dist = 0.3;
|
||||
|
||||
if dist < arrival_dist {
|
||||
// Reached waypoint
|
||||
if let Some(agent) = world.get_mut::<NavAgent>(entity) {
|
||||
agent.current_waypoint += 1;
|
||||
if agent.current_waypoint >= path_len {
|
||||
agent.reached = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Move toward waypoint
|
||||
let move_dir = dir.normalize();
|
||||
let move_dist = (speed * dt).min(dist);
|
||||
if let Some(transform) = world.get_mut::<Transform>(entity) {
|
||||
transform.position = pos + move_dir * move_dist;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
`voltex_audio/Cargo.toml` 추가:
|
||||
```toml
|
||||
voltex_ecs.workspace = true
|
||||
voltex_math.workspace = true # (이미 있을 수 있음)
|
||||
```
|
||||
|
||||
`voltex_ai/Cargo.toml` 추가:
|
||||
```toml
|
||||
voltex_ecs.workspace = true
|
||||
voltex_math.workspace = true # (이미 있을 수 있음)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### AudioSource
|
||||
- new(): 기본값 검증
|
||||
- play/stop: playing 상태 변경
|
||||
- spatial/looping 빌더 패턴
|
||||
|
||||
### NavAgent
|
||||
- new(): 기본값 검증
|
||||
- set_target: target 설정, path 초기화
|
||||
- clear_target: 모든 상태 초기화
|
||||
|
||||
### audio_sync_system (ECS 통합)
|
||||
- World에 Transform + AudioSource 엔티티 → system 호출 → played_once 변경 확인
|
||||
|
||||
### nav_agent_system (ECS 통합)
|
||||
- World에 Transform + NavAgent 엔티티 → target 설정 → system 호출 → position 변경 확인
|
||||
- 간단한 NavMesh 필요 (직선 경로)
|
||||
190
docs/superpowers/specs/2026-03-26-editor-docking-design.md
Normal file
190
docs/superpowers/specs/2026-03-26-editor-docking-design.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# Editor Docking System Design
|
||||
|
||||
## Overview
|
||||
|
||||
voltex_editor에 심플 도킹 시스템을 추가한다. 이진 분할 트리 기반으로 패널을 배치하고, 경계선 드래그로 리사이즈하며, 탭으로 같은 영역에 여러 패널을 전환할 수 있다.
|
||||
|
||||
## Scope
|
||||
|
||||
- 이진 분할 트리 (Split/Leaf 노드)
|
||||
- 경계선 드래그 리사이즈
|
||||
- 탭 표시 + 클릭 전환 (드래그 이동 없음)
|
||||
- 런타임 상태 유지 (파일 저장 없음)
|
||||
- 플로팅 윈도우 없음
|
||||
|
||||
## Data Model
|
||||
|
||||
```rust
|
||||
pub enum Axis {
|
||||
Horizontal, // 좌우 분할
|
||||
Vertical, // 상하 분할
|
||||
}
|
||||
|
||||
pub enum DockNode {
|
||||
Leaf {
|
||||
tabs: Vec<u32>, // PanelId 목록
|
||||
active: usize, // 현재 활성 탭 인덱스
|
||||
},
|
||||
Split {
|
||||
axis: Axis,
|
||||
ratio: f32, // 0.1..=0.9, 첫 번째 자식 비율
|
||||
children: [Box<DockNode>; 2],
|
||||
},
|
||||
}
|
||||
|
||||
pub struct DockTree {
|
||||
root: DockNode,
|
||||
names: Vec<&'static str>, // panel_id → 표시 이름 (인덱스 = panel_id)
|
||||
cached_layouts: Vec<LeafLayout>, // layout() 결과 캐시 (draw_chrome에서 사용)
|
||||
resizing: Option<ResizeState>, // 현재 리사이즈 중인 Split
|
||||
prev_mouse_down: bool, // 이전 프레임 마우스 상태 (클릭 감지용)
|
||||
}
|
||||
|
||||
struct ResizeState {
|
||||
node_path: Vec<usize>, // 루트에서 해당 Split까지의 child 인덱스 경로
|
||||
axis: Axis,
|
||||
origin: f32, // 분할 영역의 시작 좌표
|
||||
size: f32, // 분할 영역의 전체 크기
|
||||
}
|
||||
|
||||
pub struct LeafLayout {
|
||||
pub tabs: Vec<u32>,
|
||||
pub names: Vec<&'static str>,
|
||||
pub active: usize,
|
||||
pub tab_bar_rect: Rect,
|
||||
pub content_rect: Rect,
|
||||
}
|
||||
```
|
||||
|
||||
## Rect
|
||||
|
||||
레이아웃 계산에 사용할 사각형 구조체:
|
||||
|
||||
```rust
|
||||
pub struct Rect {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub w: f32,
|
||||
pub h: f32,
|
||||
}
|
||||
```
|
||||
|
||||
## Layout Algorithm
|
||||
|
||||
`DockTree::layout(screen_rect)` — 트리를 재귀 순회하며 각 Leaf에 영역을 배정한다.
|
||||
|
||||
- **Split 노드**: axis와 ratio에 따라 사각형을 둘로 분할
|
||||
- Horizontal: 좌(w * ratio) / 우(w * (1 - ratio))
|
||||
- Vertical: 상(h * ratio) / 하(h * (1 - ratio))
|
||||
- **Leaf 노드**: 탭 바 높이(20px)를 빼고 나머지가 컨텐츠 영역
|
||||
- 결과를 `cached_layouts`에 저장 (`draw_chrome`에서 사용)
|
||||
- 반환: `Vec<(u32, Rect)>` — (활성 탭의 panel_id, 컨텐츠 rect)
|
||||
|
||||
## Resize
|
||||
|
||||
Split 경계선 ±3px 영역에서 마우스 드래그를 감지한다.
|
||||
|
||||
- `update(mouse_x, mouse_y, mouse_down)` 매 프레임 호출
|
||||
- 클릭 감지: `mouse_down && !prev_mouse_down`으로 just-clicked 판별 (prev_mouse_down 내부 추적)
|
||||
- 마우스 다운 시 경계선 위에 있으면 해당 Split 노드를 "active resize" 상태로 설정
|
||||
- 드래그 중: 마우스 위치에 따라 ratio 업데이트
|
||||
- 마우스 릴리즈 시 해제
|
||||
- ratio는 0.1~0.9로 클램프 (패널이 너무 작아지지 않도록)
|
||||
|
||||
경계선 탐색: 트리를 재귀 순회하며 Split 노드마다 경계선 위치를 계산하고 마우스 위치와 비교한다.
|
||||
|
||||
## Tab Bar
|
||||
|
||||
각 Leaf 상단 20px에 탭 바를 렌더링한다.
|
||||
|
||||
- 탭 너비: 텍스트 길이 * 8px(글리프 폭) + 좌우 패딩 8px
|
||||
- 클릭 시 active 인덱스 변경
|
||||
- 활성 탭: 밝은 배경 (60, 60, 60)
|
||||
- 비활성 탭: 어두운 배경 (40, 40, 40)
|
||||
- 탭이 1개뿐이면 탭 바는 렌더하되 클릭 처리만 스킵
|
||||
|
||||
## Content Area
|
||||
|
||||
탭 바 아래 영역에 scissor rect를 적용하고 패널 콜백을 호출한다.
|
||||
|
||||
- 기존 `begin_panel`/`end_panel` 대신 rect를 직접 전달
|
||||
- 패널 내부에서는 기존 위젯(text, button, slider 등)을 그대로 사용
|
||||
|
||||
## Visual Feedback
|
||||
|
||||
- Split 경계선: 1px 어두운 라인 (30, 30, 30)
|
||||
- 리사이즈 중 경계선: 밝은 라인 (100, 100, 200)으로 하이라이트
|
||||
- 탭 바 하단: 1px 구분선
|
||||
|
||||
## Invariants
|
||||
|
||||
- `tabs`는 반드시 1개 이상의 요소를 가져야 한다
|
||||
- `active`는 항상 `tabs.len() - 1` 이하로 클램프한다 (out of bounds 방지)
|
||||
- 리사이즈 핸들은 탭 바보다 우선한다 (겹치는 영역에서 리사이즈 우선)
|
||||
|
||||
## API
|
||||
|
||||
```rust
|
||||
impl DockTree {
|
||||
/// 트리 생성. names: panel_id → 표시 이름 매핑 (인덱스 = panel_id)
|
||||
pub fn new(root: DockNode, names: Vec<&'static str>) -> Self;
|
||||
|
||||
/// 레이아웃 계산 — 결과를 내부에 캐시하고, 활성 패널의 (panel_id, content_rect) 반환
|
||||
pub fn layout(&mut self, rect: Rect) -> Vec<(u32, Rect)>;
|
||||
|
||||
/// 리사이즈 + 탭 클릭 처리 (내부에서 prev_mouse_down 추적)
|
||||
pub fn update(&mut self, mouse_x: f32, mouse_y: f32, mouse_down: bool);
|
||||
|
||||
/// 탭 바 + 경계선 렌더링 (cached_layouts 사용)
|
||||
pub fn draw_chrome(&self, ui: &mut UiContext);
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Example
|
||||
|
||||
```rust
|
||||
let names = vec!["Hierarchy", "Viewport", "Inspector", "Console"];
|
||||
let mut dock = DockTree::new(DockNode::Split {
|
||||
axis: Axis::Horizontal,
|
||||
ratio: 0.25,
|
||||
children: [
|
||||
Box::new(DockNode::Leaf { tabs: vec![0], active: 0 }),
|
||||
Box::new(DockNode::Split {
|
||||
axis: Axis::Vertical,
|
||||
ratio: 0.7,
|
||||
children: [
|
||||
Box::new(DockNode::Leaf { tabs: vec![1], active: 0 }),
|
||||
Box::new(DockNode::Leaf { tabs: vec![2, 3], active: 0 }),
|
||||
],
|
||||
}),
|
||||
],
|
||||
}, names);
|
||||
|
||||
// frame loop
|
||||
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 1280.0, h: 720.0 });
|
||||
dock.update(mouse_x, mouse_y, mouse_down);
|
||||
dock.draw_chrome(&mut ui);
|
||||
|
||||
for (panel_id, rect) in &areas {
|
||||
match panel_id {
|
||||
0 => draw_hierarchy(ui, rect),
|
||||
1 => draw_viewport(ui, rect),
|
||||
2 => draw_inspector(ui, rect),
|
||||
3 => draw_console(ui, rect),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
- 새 파일: `crates/voltex_editor/src/dock.rs`
|
||||
- `lib.rs`에 `mod dock; pub use dock::*;` 추가
|
||||
- 기존 파일 수정 없음
|
||||
|
||||
## Testing
|
||||
|
||||
- 레이아웃 계산: 알려진 rect에 대해 분할 결과 검증
|
||||
- 리사이즈: ratio 변경, 클램프 경계값
|
||||
- 탭: active 인덱스 전환
|
||||
- 중첩 분할: 3단계 이상 중첩된 트리 레이아웃 정확성
|
||||
138
docs/superpowers/specs/2026-03-26-entity-inspector-design.md
Normal file
138
docs/superpowers/specs/2026-03-26-entity-inspector-design.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# Entity Inspector Design
|
||||
|
||||
## Overview
|
||||
|
||||
에디터 도킹 패널에 엔티티 계층 트리(Hierarchy)와 선택된 엔티티의 컴포넌트 편집(Inspector)을 추가한다. Transform, Tag, Parent 컴포넌트를 하드코딩으로 지원.
|
||||
|
||||
## Scope
|
||||
|
||||
- Hierarchy 패널: Transform 보유 엔티티를 계층 트리로 표시, 클릭 선택
|
||||
- Inspector 패널: 선택된 엔티티의 Transform(슬라이더), Tag(text_input), Parent(읽기 전용) 편집
|
||||
- 고정 컴포넌트만 (리플렉션/등록 시스템 없음)
|
||||
|
||||
## Dependencies
|
||||
|
||||
`voltex_editor/Cargo.toml`에 추가:
|
||||
```toml
|
||||
voltex_ecs.workspace = true
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
함수 기반, 구조체 불필요:
|
||||
|
||||
```rust
|
||||
/// Hierarchy 패널: 엔티티 트리 표시, 선택 처리
|
||||
pub fn hierarchy_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &World,
|
||||
selected: &mut Option<Entity>,
|
||||
rect: &Rect,
|
||||
);
|
||||
|
||||
/// Inspector 패널: 선택된 엔티티의 컴포넌트 편집
|
||||
pub fn inspector_panel(
|
||||
ui: &mut UiContext,
|
||||
world: &mut World,
|
||||
selected: Option<Entity>,
|
||||
rect: &Rect,
|
||||
);
|
||||
```
|
||||
|
||||
## Hierarchy 패널
|
||||
|
||||
### 엔티티 수집
|
||||
- `roots(world)` → 루트 엔티티 (Transform 있고 Parent 없는) 목록
|
||||
- 각 루트에서 `world.get::<Children>(entity)` 로 자식 재귀 탐색
|
||||
|
||||
### 렌더링
|
||||
- 재귀 함수: `draw_entity_node(ui, world, entity, depth, selected)`
|
||||
- 들여쓰기: `rect.x + 4.0 + depth * 16.0`
|
||||
- 표시 텍스트: Tag가 있으면 `tag.0`, 없으면 `"Entity(id)"`
|
||||
- 자식이 있으면 접두사 `"> "`, 없으면 `" "`
|
||||
- 선택된 엔티티: 배경 하이라이트 (밝은 색)
|
||||
- 클릭 감지: `ui.mouse_clicked && ui.mouse_in_rect(x, y, w, line_height)`
|
||||
|
||||
### 스크롤
|
||||
- `ui.begin_scroll_panel` / `end_scroll_panel` 사용
|
||||
- content_height = 전체 노드 수 * line_height (재귀 `count_nodes` 헬퍼로 사전 계산)
|
||||
- 선택 하이라이트 색상: `[0x44, 0x66, 0x88, 0xFF]`
|
||||
|
||||
## Inspector 패널
|
||||
|
||||
선택된 엔티티가 None이면 "No entity selected" 표시.
|
||||
|
||||
### Transform 섹션
|
||||
- `world.has_component::<Transform>(entity)` 체크
|
||||
- 헤더: "-- Transform --"
|
||||
- Position X/Y/Z: 슬라이더 3개 (range -50.0 ~ 50.0)
|
||||
- Rotation X/Y/Z: 슬라이더 3개 (range -3.15 ~ 3.15, 약 ±π)
|
||||
- Scale X/Y/Z: 슬라이더 3개 (range 0.01 ~ 10.0)
|
||||
|
||||
**Borrow 패턴** (Transform은 Copy이므로 cheap):
|
||||
```rust
|
||||
// 1. 값 복사 (immutable borrow 해제)
|
||||
let (px, py, pz, rx, ry, rz, sx, sy, sz) = {
|
||||
let t = world.get::<Transform>(entity).unwrap();
|
||||
(t.position.x, t.position.y, t.position.z,
|
||||
t.rotation.x, t.rotation.y, t.rotation.z,
|
||||
t.scale.x, t.scale.y, t.scale.z)
|
||||
};
|
||||
// 2. 슬라이더 (world 미사용)
|
||||
let new_px = ui.slider("Pos X", px, -50.0, 50.0);
|
||||
// ... 9개 슬라이더
|
||||
// 3. 변경된 값 쓰기 (mutable borrow)
|
||||
if let Some(t) = world.get_mut::<Transform>(entity) {
|
||||
t.position.x = new_px; // ...
|
||||
}
|
||||
```
|
||||
|
||||
### Tag 섹션
|
||||
- `world.has_component::<Tag>(entity)` 체크
|
||||
- 헤더: "-- Tag --"
|
||||
- **호출자가 `tag_buffer: String`을 별도로 유지** (선택 변경 시 동기화)
|
||||
- `ui.text_input(id, &mut tag_buffer, x, y, width)` 로 편집
|
||||
- 변경 시 `world.get_mut::<Tag>(entity).unwrap().0 = tag_buffer.clone()`
|
||||
|
||||
### Parent 섹션
|
||||
- `world.has_component::<Parent>(entity)` 체크
|
||||
- 헤더: "-- Parent --"
|
||||
- `"Parent: Entity(id)"` 텍스트 (읽기 전용)
|
||||
|
||||
## UI 통합
|
||||
|
||||
```rust
|
||||
// editor_demo에서 dock 패널 매핑
|
||||
// panel 0: Hierarchy → hierarchy_panel(ui, &world, &mut selected, rect)
|
||||
// panel 1: Viewport → 3D scene
|
||||
// panel 2: Inspector → inspector_panel(ui, &mut world, selected, rect)
|
||||
// panel 3: Console → text
|
||||
```
|
||||
|
||||
editor_demo에서 ECS World를 생성하고 데모 엔티티를 배치:
|
||||
- Ground (Transform + Tag("Ground"))
|
||||
- Cube1 (Transform + Tag("Cube1"))
|
||||
- Cube2 (Transform + Tag("Cube2"))
|
||||
- Cube3 (Transform + Tag("Cube3"))
|
||||
- Parent-child 관계 1개 (Cube2를 Cube1의 자식으로)
|
||||
|
||||
Transform 수정 후 `propagate_transforms(world)` 호출하여 WorldTransform 갱신.
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_editor/src/inspector.rs` — hierarchy_panel, inspector_panel 함수
|
||||
- `crates/voltex_editor/src/lib.rs` — 모듈 추가
|
||||
- `crates/voltex_editor/Cargo.toml` — voltex_ecs 의존성
|
||||
- `examples/editor_demo/src/main.rs` — World + 엔티티 + 패널 통합
|
||||
|
||||
## Testing
|
||||
|
||||
### hierarchy_panel (GPU 불필요)
|
||||
- World에 엔티티 3개 (Transform) 추가 → hierarchy_panel 호출 → draw_list에 커맨드 생성 확인
|
||||
- 선택 처리: 마우스 클릭 시뮬레이션 → selected 변경 확인
|
||||
- 빈 World → 커맨드 최소 (헤더만)
|
||||
|
||||
### inspector_panel (GPU 불필요)
|
||||
- Transform가진 엔티티 선택 → inspector_panel 호출 → draw_list에 슬라이더 커맨드 확인
|
||||
- None 선택 → "No entity selected" 텍스트만
|
||||
- Transform 슬라이더 조작 → world에서 값 변경 확인 (슬라이더 반환값으로 시뮬레이션)
|
||||
@@ -0,0 +1,137 @@
|
||||
# glTF Animation/Skin Parser Extension Design
|
||||
|
||||
## Overview
|
||||
|
||||
기존 gltf.rs 파서를 확장하여 animations, skins, nodes 데이터를 파싱한다. 렌더링/애니메이션 시스템은 별도 구현.
|
||||
|
||||
## Scope
|
||||
|
||||
- nodes 배열 파싱 (스켈레톤 계층, transform)
|
||||
- skins 배열 파싱 (joints, inverse bind matrices)
|
||||
- animations 배열 파싱 (channels, samplers, keyframes)
|
||||
- JOINTS_0/WEIGHTS_0 버텍스 어트리뷰트 추출
|
||||
- 기존 메시/머티리얼 파싱에 영향 없음
|
||||
|
||||
## Data Structures
|
||||
|
||||
### GltfNode
|
||||
```rust
|
||||
pub struct GltfNode {
|
||||
pub name: Option<String>,
|
||||
pub children: Vec<usize>,
|
||||
pub translation: [f32; 3], // 기본 [0,0,0]
|
||||
pub rotation: [f32; 4], // 쿼터니언 [x,y,z,w], 기본 [0,0,0,1]
|
||||
pub scale: [f32; 3], // 기본 [1,1,1]
|
||||
pub mesh: Option<usize>,
|
||||
pub skin: Option<usize>,
|
||||
}
|
||||
```
|
||||
|
||||
### GltfSkin
|
||||
```rust
|
||||
pub struct GltfSkin {
|
||||
pub name: Option<String>,
|
||||
pub joints: Vec<usize>,
|
||||
pub inverse_bind_matrices: Vec<[[f32; 4]; 4]>,
|
||||
pub skeleton: Option<usize>,
|
||||
}
|
||||
```
|
||||
|
||||
### GltfAnimation
|
||||
```rust
|
||||
pub struct GltfAnimation {
|
||||
pub name: Option<String>,
|
||||
pub channels: Vec<GltfChannel>,
|
||||
}
|
||||
|
||||
pub struct GltfChannel {
|
||||
pub target_node: usize,
|
||||
pub target_path: AnimationPath,
|
||||
pub interpolation: Interpolation,
|
||||
pub times: Vec<f32>,
|
||||
pub values: Vec<f32>, // flat: vec3이면 len = times.len()*3, quat이면 *4
|
||||
}
|
||||
|
||||
pub enum AnimationPath {
|
||||
Translation,
|
||||
Rotation,
|
||||
Scale,
|
||||
}
|
||||
|
||||
pub enum Interpolation {
|
||||
Linear,
|
||||
Step,
|
||||
CubicSpline,
|
||||
}
|
||||
```
|
||||
|
||||
### GltfData 확장
|
||||
```rust
|
||||
pub struct GltfData {
|
||||
pub meshes: Vec<GltfMesh>,
|
||||
pub nodes: Vec<GltfNode>,
|
||||
pub skins: Vec<GltfSkin>,
|
||||
pub animations: Vec<GltfAnimation>,
|
||||
}
|
||||
```
|
||||
|
||||
### GltfMesh 확장
|
||||
```rust
|
||||
pub struct GltfMesh {
|
||||
pub vertices: Vec<MeshVertex>,
|
||||
pub indices: Vec<u32>,
|
||||
pub name: Option<String>,
|
||||
pub material: Option<GltfMaterial>,
|
||||
pub joints: Option<Vec<[u16; 4]>>, // JOINTS_0 (스킨드 메시만)
|
||||
pub weights: Option<Vec<[f32; 4]>>, // WEIGHTS_0 (스킨드 메시만)
|
||||
}
|
||||
```
|
||||
|
||||
## Parsing Logic
|
||||
|
||||
### nodes
|
||||
- `"nodes"` JSON 배열 순회
|
||||
- 각 노드에서 translation(vec3), rotation(vec4 quat), scale(vec3) 추출 (없으면 기본값)
|
||||
- children(배열), mesh(정수), skin(정수) 추출
|
||||
|
||||
### skins
|
||||
- `"skins"` JSON 배열 순회
|
||||
- joints: 정수 배열 → 노드 인덱스
|
||||
- inverseBindMatrices: accessor 인덱스 → mat4 배열 추출 (16 floats per joint)
|
||||
- skeleton: 정수 (루트 노드)
|
||||
|
||||
### animations
|
||||
- `"animations"` 배열의 각 애니메이션:
|
||||
- samplers 배열 파싱: input(시간 accessor), output(값 accessor), interpolation(문자열)
|
||||
- channels 배열 파싱: sampler 인덱스, target.node, target.path(문자열)
|
||||
- 각 채널을 GltfChannel로 조합: sampler의 input→times, output→values, interpolation
|
||||
|
||||
### Accessor 추출 확장
|
||||
- 기존 `extract_accessor_f32` 재사용 (times, values, inverse bind matrices)
|
||||
- 새로 `extract_accessor_u16`/`extract_accessor_u8` 추가 (JOINTS_0용)
|
||||
- WEIGHTS_0: `extract_accessor_f32` 재사용
|
||||
|
||||
### JOINTS_0/WEIGHTS_0
|
||||
- 메시 프리미티브의 attributes에서 "JOINTS_0", "WEIGHTS_0" 키 확인
|
||||
- JOINTS_0: 보통 UNSIGNED_BYTE(5121) 또는 UNSIGNED_SHORT(5123) 타입
|
||||
- WEIGHTS_0: FLOAT(5126) 타입
|
||||
- 결과를 GltfMesh의 joints/weights 필드에 저장
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_renderer/src/gltf.rs` — 기존 파일 확장
|
||||
|
||||
## Testing
|
||||
|
||||
실제 스킨드 GLB 파일이 필요하므로 테스트 전략:
|
||||
|
||||
### 단위 테스트 (바이트 구성 어려움 → 로직 위주)
|
||||
- AnimationPath/Interpolation 문자열 파싱 테스트
|
||||
- 기존 파싱이 깨지지 않는 회귀 테스트 (기존 테스트 GLB 그대로 통과)
|
||||
|
||||
### 통합 테스트 (실제 GLB)
|
||||
- glTF 샘플 파일 사용 (assets/test/ 에 작은 스킨드 GLB 포함)
|
||||
- 또는 테스트에서 간단한 GLB를 프로그래밍적으로 구성 (기존 테스트 패턴 따라)
|
||||
- nodes 수, skins 수, animations 수, channels 수 검증
|
||||
- inverse bind matrices 배열 크기 = joints 수
|
||||
- times/values 배열이 비어있지 않은지 확인
|
||||
98
docs/superpowers/specs/2026-03-26-instancing-design.md
Normal file
98
docs/superpowers/specs/2026-03-26-instancing-design.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# GPU Instancing Design
|
||||
|
||||
## Overview
|
||||
|
||||
동일 메시를 수천 개 렌더링할 때 단일 draw call로 처리하는 인스턴싱 시스템. per-entity UBO 대신 인스턴스 버퍼에 모델 행렬을 패킹.
|
||||
|
||||
## Scope
|
||||
|
||||
- InstanceData 구조체 (모델 행렬 + 추가 per-instance 데이터)
|
||||
- InstanceBuffer 관리 (동적 크기, 매 프레임 업로드)
|
||||
- 인스턴스드 렌더 파이프라인 (기존 mesh_shader 확장)
|
||||
- 기존 파이프라인과 병행 (인스턴싱은 옵션)
|
||||
|
||||
## InstanceData
|
||||
|
||||
```rust
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Pod, Zeroable)]
|
||||
pub struct InstanceData {
|
||||
pub model: [[f32; 4]; 4], // 모델 행렬 (64 bytes)
|
||||
pub color: [f32; 4], // per-instance 색상/틴트 (16 bytes)
|
||||
}
|
||||
// Total: 80 bytes per instance
|
||||
```
|
||||
|
||||
셰이더에서 vertex attribute로 전달 (location 4~8).
|
||||
|
||||
## InstanceBuffer
|
||||
|
||||
```rust
|
||||
pub struct InstanceBuffer {
|
||||
buffer: wgpu::Buffer,
|
||||
capacity: usize, // 현재 버퍼 용량 (인스턴스 수)
|
||||
pub count: usize, // 실제 인스턴스 수
|
||||
}
|
||||
```
|
||||
|
||||
- `new(device, initial_capacity)` — VERTEX | COPY_DST 버퍼
|
||||
- `update(device, queue, instances: &[InstanceData])` — 용량 초과 시 재생성, write_buffer
|
||||
- 인스턴스 데이터는 CPU에서 매 프레임 빌드 (Transform 순회)
|
||||
|
||||
## Instanced Shader (instanced_shader.wgsl)
|
||||
|
||||
기존 mesh_shader.wgsl 기반 + 인스턴스 입력:
|
||||
|
||||
```wgsl
|
||||
struct InstanceInput {
|
||||
@location(4) model_0: vec4<f32>,
|
||||
@location(5) model_1: vec4<f32>,
|
||||
@location(6) model_2: vec4<f32>,
|
||||
@location(7) model_3: vec4<f32>,
|
||||
@location(8) color: vec4<f32>,
|
||||
};
|
||||
|
||||
@vertex
|
||||
fn vs_main(vertex: VertexInput, instance: InstanceInput) -> VertexOutput {
|
||||
let model = mat4x4<f32>(instance.model_0, instance.model_1, instance.model_2, instance.model_3);
|
||||
let world_pos = model * vec4<f32>(vertex.position, 1.0);
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
## Pipeline
|
||||
|
||||
기존 create_mesh_pipeline에 instance attributes 추가:
|
||||
- slot 0: MeshVertex (48 bytes, per-vertex)
|
||||
- slot 1: InstanceData (80 bytes, per-instance, step_mode: Instance)
|
||||
|
||||
```rust
|
||||
pub fn create_instanced_pipeline(
|
||||
device: &wgpu::Device,
|
||||
format: wgpu::TextureFormat,
|
||||
light_layout: &wgpu::BindGroupLayout,
|
||||
texture_layout: &wgpu::BindGroupLayout,
|
||||
) -> wgpu::RenderPipeline;
|
||||
```
|
||||
|
||||
## Render
|
||||
|
||||
```rust
|
||||
rpass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
|
||||
rpass.set_vertex_buffer(1, instance_buffer.buffer.slice(..));
|
||||
rpass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
|
||||
rpass.draw_indexed(0..mesh.num_indices, 0, 0..instance_buffer.count as u32);
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_renderer/src/instancing.rs` — InstanceData, InstanceBuffer, pipeline, InstanceData::LAYOUT
|
||||
- `crates/voltex_renderer/src/instanced_shader.wgsl` — 인스턴스드 셰이더
|
||||
- `crates/voltex_renderer/src/lib.rs` — 모듈 추가
|
||||
|
||||
## Testing
|
||||
|
||||
- InstanceData 크기: size_of == 80
|
||||
- InstanceData::LAYOUT: attribute 수, stride 검증
|
||||
- InstanceBuffer: capacity 확장 로직 (GPU 없이 로직만)
|
||||
- sort by mesh for batching (같은 메시끼리 모아서 단일 draw call)
|
||||
219
docs/superpowers/specs/2026-03-26-scene-viewport-design.md
Normal file
219
docs/superpowers/specs/2026-03-26-scene-viewport-design.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Scene Viewport Design
|
||||
|
||||
## Overview
|
||||
|
||||
도킹 패널 안에 3D 씬을 렌더링한다. 오프스크린 텍스처에 Blinn-Phong forward 렌더링 후, UI 시스템에서 텍스처드 쿼드로 표시. 오빗 카메라로 조작.
|
||||
|
||||
## Scope
|
||||
|
||||
- 오프스크린 렌더 타겟 (color + depth)
|
||||
- 텍스처를 UI 패널 영역에 표시하는 셰이더/파이프라인
|
||||
- 오빗 카메라 (회전, 줌, 팬)
|
||||
- 매 프레임 패널 크기 변경 시 텍스처 재생성
|
||||
- 데모 씬 (큐브 + 방향광)
|
||||
|
||||
## Dependencies
|
||||
|
||||
`voltex_editor/Cargo.toml`에 추가 필요:
|
||||
```toml
|
||||
voltex_math.workspace = true
|
||||
voltex_renderer.workspace = true
|
||||
```
|
||||
- `voltex_math`: OrbitCamera에서 Vec3, Mat4 사용
|
||||
- `voltex_renderer`: Mesh, MeshVertex, 파이프라인 패턴 재사용
|
||||
|
||||
## ViewportTexture
|
||||
|
||||
오프스크린 렌더 타겟을 관리한다.
|
||||
|
||||
```rust
|
||||
pub struct ViewportTexture {
|
||||
pub color_texture: wgpu::Texture,
|
||||
pub color_view: wgpu::TextureView,
|
||||
pub depth_texture: wgpu::Texture,
|
||||
pub depth_view: wgpu::TextureView,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
```
|
||||
|
||||
- `new(device, width, height)` — **Rgba8Unorm** (linear) color + Depth32Float depth 텍스처 생성
|
||||
- 주의: sRGB 변환은 surface에 최종 출력 시 한 번만 적용 (이중 감마 보정 방지)
|
||||
- color: usage = RENDER_ATTACHMENT | TEXTURE_BINDING (렌더 타겟 + 후속 샘플링)
|
||||
- depth: usage = RENDER_ATTACHMENT
|
||||
- `ensure_size(&mut self, device, w, h)` — 크기 다르면 재생성, 같으면 무시
|
||||
|
||||
## ViewportRenderer
|
||||
|
||||
오프스크린 텍스처를 UI 위에 텍스처드 쿼드로 그린다.
|
||||
|
||||
```rust
|
||||
pub struct ViewportRenderer {
|
||||
pipeline: wgpu::RenderPipeline,
|
||||
sampler: wgpu::Sampler,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
}
|
||||
```
|
||||
|
||||
- `new(device, surface_format)` — 파이프라인, 샘플러, 바인드 그룹 레이아웃 생성
|
||||
- `render(encoder, target_view, viewport_texture, screen_w, screen_h, rect)` — 패널 영역에 텍스처 매핑
|
||||
- 바인드 그룹은 `render()` 호출 시 매 프레임 생성 (텍스처 리사이즈로 TextureView가 변경될 수 있으므로)
|
||||
|
||||
### 셰이더 (viewport_shader.wgsl)
|
||||
|
||||
버텍스 셰이더:
|
||||
- uniform으로 rect 좌표 (x, y, w, h)와 screen 크기 (screen_w, screen_h) 전달
|
||||
- 4개 버텍스를 rect 영역에 맞게 NDC로 변환
|
||||
- UV = (0,0) ~ (1,1)
|
||||
|
||||
프래그먼트 셰이더:
|
||||
- 텍스처 샘플링하여 출력
|
||||
|
||||
바인드 그룹:
|
||||
- group(0) binding(0): uniform (rect + screen)
|
||||
- group(0) binding(1): texture_2d
|
||||
- group(0) binding(2): sampler
|
||||
|
||||
### 렌더 패스 설정
|
||||
- target: surface texture view
|
||||
- LoadOp::Load (기존 클리어된 배경 위에 오버레이)
|
||||
- scissor rect: 패널 영역으로 클리핑
|
||||
- 버텍스 버퍼 없음 — vertex_index로 4개 정점 생성 (triangle strip 또는 2 triangles)
|
||||
|
||||
## OrbitCamera
|
||||
|
||||
에디터 뷰포트 전용 카메라.
|
||||
|
||||
```rust
|
||||
pub struct OrbitCamera {
|
||||
pub target: Vec3, // 회전 중심점
|
||||
pub distance: f32, // 중심에서 카메라까지 거리
|
||||
pub yaw: f32, // 수평 회전 (라디안)
|
||||
pub pitch: f32, // 수직 회전 (라디안, -PI/2+0.01 ~ PI/2-0.01 클램프)
|
||||
pub fov_y: f32, // 시야각 (기본 PI/4)
|
||||
pub near: f32, // 0.1
|
||||
pub far: f32, // 100.0
|
||||
}
|
||||
```
|
||||
|
||||
### 카메라 위치 계산
|
||||
```
|
||||
position = target + Vec3(
|
||||
distance * cos(pitch) * sin(yaw),
|
||||
distance * sin(pitch),
|
||||
distance * cos(pitch) * cos(yaw),
|
||||
)
|
||||
```
|
||||
|
||||
### 행렬
|
||||
- `view_matrix() -> Mat4` — look_at(position, target, up)
|
||||
- `projection_matrix(aspect: f32) -> Mat4` — perspective(fov_y, aspect, near, far)
|
||||
- `view_projection(aspect: f32) -> Mat4` — projection * view
|
||||
|
||||
### 입력 처리
|
||||
- `orbit(&mut self, dx: f32, dy: f32)` — 좌클릭 드래그: yaw += dx * sensitivity, pitch += dy * sensitivity (클램프)
|
||||
- `zoom(&mut self, delta: f32)` — 스크롤: distance *= (1.0 - delta * 0.1), min 0.5 ~ max 50.0
|
||||
- `pan(&mut self, dx: f32, dy: f32)` — 미들 클릭 드래그: target을 카메라의 right/up 방향으로 이동. right 벡터가 영벡터에 가까울 때 (pitch ≈ ±90°) Vec3::X로 폴백.
|
||||
|
||||
입력은 뷰포트 패널 위에 마우스가 있을 때만 처리 (Rect::contains 활용).
|
||||
|
||||
## 3D 씬 렌더링
|
||||
|
||||
기존 mesh_shader.wgsl (Blinn-Phong)을 재사용한다.
|
||||
|
||||
### 데모 씬 구성
|
||||
- 바닥 평면 (10x10)
|
||||
- 큐브 3개 (위치 다르게)
|
||||
- 방향광 1개
|
||||
|
||||
### 렌더 패스
|
||||
- target: ViewportTexture.color_view (Rgba8Unorm)
|
||||
- depth: ViewportTexture.depth_view
|
||||
- LoadOp::Clear (매 프레임 클리어)
|
||||
- 3D 씬 파이프라인은 **오프스크린 포맷(Rgba8Unorm)**으로 생성 (surface_format과 다름)
|
||||
- PipelineLayoutDescriptor에 `immediate_size: 0` 필수 (wgpu 28.0)
|
||||
|
||||
### 바인드 그룹 레이아웃
|
||||
기존 mesh_shader.wgsl 구조를 따른다:
|
||||
- **group(0) binding(0)**: CameraUniform (view_proj, model, camera_pos) — 모델 변환 포함
|
||||
- **group(0) binding(1)**: LightUniform (direction, color, ambient)
|
||||
- **group(1)**: 텍스처 (사용 안 하면 더미 바인드)
|
||||
- max_bind_groups=4 내에서 충분 (최대 2개 사용)
|
||||
|
||||
### WGSL vec3 패딩
|
||||
Rust 측 uniform 구조체에서 vec3 필드 뒤에 `_padding: f32` 추가 (16바이트 정렬):
|
||||
```rust
|
||||
#[repr(C)]
|
||||
struct LightUniform {
|
||||
direction: [f32; 3],
|
||||
_pad0: f32,
|
||||
color: [f32; 3],
|
||||
_pad1: f32,
|
||||
ambient_strength: f32,
|
||||
_pad2: [f32; 3],
|
||||
}
|
||||
```
|
||||
|
||||
- 기존 Mesh, MeshVertex, 파이프라인 생성 패턴 그대로 사용
|
||||
|
||||
## UI 통합
|
||||
|
||||
```rust
|
||||
// editor_demo 프레임 루프
|
||||
let areas = dock.layout(screen_rect);
|
||||
dock.update(mx, my, mouse_down);
|
||||
dock.draw_chrome(&mut ui);
|
||||
|
||||
for (panel_id, rect) in &areas {
|
||||
match panel_id {
|
||||
1 => {
|
||||
// 뷰포트 패널
|
||||
viewport_tex.ensure_size(&gpu.device, rect.w as u32, rect.h as u32);
|
||||
|
||||
// 오빗 카메라 입력 (패널 위에 마우스 있을 때만)
|
||||
if rect.contains(mx, my) {
|
||||
if left_dragging { orbit_cam.orbit(dx, dy); }
|
||||
if middle_dragging { orbit_cam.pan(dx, dy); }
|
||||
if scroll != 0.0 { orbit_cam.zoom(scroll); }
|
||||
}
|
||||
|
||||
// 3D 씬 렌더 → 오프스크린 텍스처
|
||||
render_scene(&mut encoder, &viewport_tex, &orbit_cam, &scene_data);
|
||||
|
||||
// 텍스처를 UI에 표시
|
||||
viewport_renderer.render(&mut encoder, &surface_view, &viewport_tex, screen_w, screen_h, rect);
|
||||
}
|
||||
_ => { /* 다른 패널 위젯 */ }
|
||||
}
|
||||
}
|
||||
|
||||
// UI 오버레이 렌더
|
||||
ui_renderer.render(...);
|
||||
```
|
||||
|
||||
순서: 3D 씬 렌더 → 뷰포트 텍스처 표시 → UI 오버레이 (탭 바 등은 위에 그려짐)
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_editor/src/viewport_texture.rs` — ViewportTexture
|
||||
- `crates/voltex_editor/src/viewport_renderer.rs` — ViewportRenderer
|
||||
- `crates/voltex_editor/src/viewport_shader.wgsl` — 텍스처드 쿼드 셰이더
|
||||
- `crates/voltex_editor/src/orbit_camera.rs` — OrbitCamera
|
||||
- `crates/voltex_editor/src/lib.rs` — 모듈 추가
|
||||
- `examples/editor_demo/src/main.rs` — 통합
|
||||
|
||||
## Testing
|
||||
|
||||
### OrbitCamera (순수 수학, GPU 불필요)
|
||||
- position 계산: 다양한 yaw/pitch에서 카메라 위치 검증
|
||||
- view_matrix: 알려진 위치에서 행렬 검증
|
||||
- orbit: dx/dy 입력 후 yaw/pitch 변경 확인
|
||||
- zoom: distance 변경 + min/max 클램프
|
||||
- pan: target 이동 방향 검증
|
||||
- pitch 클램프: +-90도 초과 방지
|
||||
|
||||
### ViewportTexture (GPU 필요 — 단위 테스트 불가, 빌드 검증만)
|
||||
- ensure_size 로직은 크기 비교만이므로 간단한 상태 테스트 가능
|
||||
|
||||
### ViewportRenderer (GPU 의존 — 빌드 검증)
|
||||
- NDC 변환 함수만 별도 추출하여 테스트 가능
|
||||
107
docs/superpowers/specs/2026-03-26-ssgi-quality-design.md
Normal file
107
docs/superpowers/specs/2026-03-26-ssgi-quality-design.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# SSGI Quality Improvement Design (Bilateral Blur + Temporal Accumulation)
|
||||
|
||||
## Overview
|
||||
|
||||
SSGI 출력의 노이즈를 줄이기 위해 bilateral blur와 temporal accumulation 필터를 추가한다.
|
||||
|
||||
## Scope
|
||||
|
||||
- Bilateral blur 셰이더: depth/normal 기반 edge-aware blur
|
||||
- Temporal accumulation: 이전 프레임 SSGI와 현재 프레임을 블렌딩
|
||||
- 각각 독립적 풀스크린 패스
|
||||
|
||||
## Bilateral Blur
|
||||
|
||||
Depth와 normal 차이를 가중치로 사용하여 edge를 보존하면서 노이즈를 줄이는 블러.
|
||||
|
||||
### 셰이더 (bilateral_blur.wgsl)
|
||||
|
||||
```wgsl
|
||||
@group(0) @binding(0) var input_tex: texture_2d<f32>; // SSGI 출력
|
||||
@group(0) @binding(1) var depth_tex: texture_2d<f32>; // G-Buffer depth
|
||||
@group(0) @binding(2) var normal_tex: texture_2d<f32>; // G-Buffer normal
|
||||
@group(0) @binding(3) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
|
||||
// 5x5 bilateral kernel
|
||||
// weight = gaussian(spatial_dist) * exp(-|depth_diff|/sigma_d) * max(dot(n1,n2),0)^sigma_n
|
||||
```
|
||||
|
||||
컴퓨트 셰이더로 구현. 워크그룹 16x16.
|
||||
|
||||
### BilateralBlur 구조체
|
||||
|
||||
```rust
|
||||
pub struct BilateralBlur {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
}
|
||||
```
|
||||
|
||||
- `new(device)` — 파이프라인 생성
|
||||
- `dispatch(encoder, ssgi_view, depth_view, normal_view, output_view, width, height)` — 실행
|
||||
|
||||
### 파라미터
|
||||
- kernel_size: 5 (하드코딩)
|
||||
- sigma_spatial: 2.0
|
||||
- sigma_depth: 0.1
|
||||
- sigma_normal: 16.0
|
||||
|
||||
## Temporal Accumulation
|
||||
|
||||
이전 프레임의 결과와 현재 프레임을 블렌딩하여 시간에 따라 노이즈를 줄임.
|
||||
|
||||
### 셰이더 (temporal_accum.wgsl)
|
||||
|
||||
```wgsl
|
||||
@group(0) @binding(0) var current_tex: texture_2d<f32>; // 현재 프레임 (blurred SSGI)
|
||||
@group(0) @binding(1) var history_tex: texture_2d<f32>; // 이전 프레임 결과
|
||||
@group(0) @binding(2) var output_tex: texture_storage_2d<rgba16float, write>;
|
||||
|
||||
// blend_factor = 0.1 (10% 새 프레임, 90% 히스토리)
|
||||
// output = mix(history, current, blend_factor)
|
||||
```
|
||||
|
||||
### TemporalAccumulation 구조체
|
||||
|
||||
```rust
|
||||
pub struct TemporalAccumulation {
|
||||
pipeline: wgpu::ComputePipeline,
|
||||
bind_group_layout: wgpu::BindGroupLayout,
|
||||
history_texture: wgpu::Texture, // 이전 프레임 저장
|
||||
history_view: wgpu::TextureView,
|
||||
pub blend_factor: f32, // 0.1
|
||||
}
|
||||
```
|
||||
|
||||
- `new(device, width, height)` — 히스토리 텍스처 + 파이프라인
|
||||
- `dispatch(encoder, current_view, output_view, width, height)` — 블렌딩 실행
|
||||
- `swap_history(new_view)` — 출력을 히스토리로 복사 (또는 ping-pong)
|
||||
|
||||
### Ping-Pong 패턴
|
||||
텍스처 2개를 교대로 사용:
|
||||
- Frame N: read history_A, write history_B → output = history_B
|
||||
- Frame N+1: read history_B, write history_A → output = history_A
|
||||
|
||||
## 적용 순서
|
||||
|
||||
```
|
||||
SSGI Pass → [Bilateral Blur] → [Temporal Accumulation] → Lighting Pass (기존 SSGI 텍스처 대체)
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_renderer/src/bilateral_blur.rs` — BilateralBlur
|
||||
- `crates/voltex_renderer/src/bilateral_blur.wgsl` — 컴퓨트 셰이더
|
||||
- `crates/voltex_renderer/src/temporal_accum.rs` — TemporalAccumulation
|
||||
- `crates/voltex_renderer/src/temporal_accum.wgsl` — 컴퓨트 셰이더
|
||||
- `crates/voltex_renderer/src/lib.rs` — 모듈 추가
|
||||
|
||||
## Testing
|
||||
|
||||
### Bilateral Blur (순수 수학)
|
||||
- gaussian weight 계산 검증
|
||||
- bilateral weight: depth 차이 클수록 weight 작아지는지
|
||||
|
||||
### Temporal Accumulation (순수 수학)
|
||||
- blend: factor=0.0 → history 그대로, factor=1.0 → current 그대로
|
||||
- factor=0.1 → 90% history + 10% current
|
||||
124
docs/superpowers/specs/2026-03-26-transparent-objects-design.md
Normal file
124
docs/superpowers/specs/2026-03-26-transparent-objects-design.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Transparent Objects Design
|
||||
|
||||
## Overview
|
||||
|
||||
디퍼드 라이팅 후 HDR 타겟에 알파 블렌딩으로 투명 오브젝트를 포워드 렌더링한다.
|
||||
|
||||
## Scope
|
||||
|
||||
- ForwardPass 구조체 + 파이프라인 (알파 블렌딩, HDR 타겟)
|
||||
- 포워드 셰이더 (Blinn-Phong + alpha)
|
||||
- 카메라 거리 기준 back-to-front 정렬
|
||||
- deferred_demo에 반투명 큐브 추가
|
||||
|
||||
## Render Pass Order
|
||||
|
||||
```
|
||||
G-Buffer → SSGI → RT Shadow → Lighting → [Forward Transparency] → Bloom → Tonemap
|
||||
```
|
||||
|
||||
## ForwardPass
|
||||
|
||||
```rust
|
||||
pub struct ForwardPass {
|
||||
pipeline: wgpu::RenderPipeline,
|
||||
}
|
||||
```
|
||||
|
||||
### 파이프라인 설정
|
||||
- target: Rgba16Float (HDR_FORMAT) — LoadOp::Load
|
||||
- depth: Depth32Float (G-Buffer depth) — LoadOp::Load, depth_write: false, compare: LessEqual
|
||||
- blend: SrcAlpha / OneMinusSrcAlpha (standard alpha blending)
|
||||
- vertex: MeshVertex layout (기존 동일)
|
||||
- bind groups: group(0) camera+light, group(1) texture — 기존 레이아웃 재사용
|
||||
- immediate_size: 0 (wgpu 28.0)
|
||||
|
||||
### 생성
|
||||
```rust
|
||||
impl ForwardPass {
|
||||
pub fn new(
|
||||
device: &wgpu::Device,
|
||||
camera_light_layout: &wgpu::BindGroupLayout,
|
||||
texture_layout: &wgpu::BindGroupLayout,
|
||||
) -> Self;
|
||||
}
|
||||
```
|
||||
|
||||
### 렌더
|
||||
```rust
|
||||
pub fn render(
|
||||
&self,
|
||||
encoder: &mut wgpu::CommandEncoder,
|
||||
hdr_view: &wgpu::TextureView,
|
||||
depth_view: &wgpu::TextureView,
|
||||
camera_light_bg: &wgpu::BindGroup,
|
||||
texture_bg: &wgpu::BindGroup,
|
||||
meshes: &[(Mesh, f32)], // (mesh, alpha) — back-to-front 정렬 완료 상태
|
||||
);
|
||||
```
|
||||
|
||||
## Forward Shader (forward_shader.wgsl)
|
||||
|
||||
기존 mesh_shader.wgsl과 거의 동일하나:
|
||||
- CameraUniform에 alpha uniform 추가, 또는 별도 uniform
|
||||
- fragment output에서 `color.a = alpha`
|
||||
|
||||
가장 간단한 방법: CameraUniform의 `_padding` 필드를 `alpha`로 재활용:
|
||||
```wgsl
|
||||
struct CameraUniform {
|
||||
view_proj: mat4x4<f32>,
|
||||
model: mat4x4<f32>,
|
||||
camera_pos: vec3<f32>,
|
||||
alpha: f32, // 기존 _padding → alpha로 사용
|
||||
};
|
||||
```
|
||||
|
||||
Fragment shader:
|
||||
```wgsl
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
// Blinn-Phong lighting calculation (동일)
|
||||
let lit_color = ambient + diffuse + specular;
|
||||
return vec4<f32>(lit_color, camera.alpha);
|
||||
}
|
||||
```
|
||||
|
||||
## 투명 메시 정렬
|
||||
|
||||
```rust
|
||||
pub fn sort_transparent_back_to_front(
|
||||
meshes: &mut Vec<(usize, Vec3)>, // (mesh_index, center_position)
|
||||
camera_pos: Vec3,
|
||||
) {
|
||||
meshes.sort_by(|a, b| {
|
||||
let da = (a.1 - camera_pos).length();
|
||||
let db = (b.1 - camera_pos).length();
|
||||
db.partial_cmp(&da).unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
이 함수는 순수 수학이므로 단위 테스트 가능.
|
||||
|
||||
## deferred_demo 통합
|
||||
|
||||
1. ForwardPass 생성 (기존 camera_light_layout, texture_layout 재사용)
|
||||
2. 반투명 큐브 2개 생성 (alpha=0.4, alpha=0.6)
|
||||
3. 매 프레임: 투명 메시를 카메라 거리로 정렬
|
||||
4. Lighting pass 후, Bloom 전에 forward_pass.render() 호출
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_renderer/src/forward_pass.rs` — ForwardPass
|
||||
- `crates/voltex_renderer/src/forward_shader.wgsl` — 포워드 셰이더
|
||||
- `crates/voltex_renderer/src/lib.rs` — 모듈 추가
|
||||
- `examples/deferred_demo/src/main.rs` — 투명 메시 + 포워드 패스 통합
|
||||
|
||||
## Testing
|
||||
|
||||
### sort_transparent_back_to_front (순수 수학)
|
||||
- 3개 위치를 카메라 기준 정렬 → 먼 것부터 순서 검증
|
||||
- 동일 거리 → 크래시 없음
|
||||
|
||||
### ForwardPass (GPU 의존 → 빌드 검증)
|
||||
- 컴파일 성공 확인
|
||||
270
docs/superpowers/specs/2026-03-26-ttf-font-design.md
Normal file
270
docs/superpowers/specs/2026-03-26-ttf-font-design.md
Normal file
@@ -0,0 +1,270 @@
|
||||
# TTF Font Design
|
||||
|
||||
## Overview
|
||||
|
||||
자체 TTF 파서 + 래스터라이저를 구현하고, 온디맨드 글리프 캐시 아틀라스로 가변폭 유니코드 텍스트를 렌더링한다. 기존 FontAtlas를 유지하면서 TtfFont를 병행 추가.
|
||||
|
||||
## Scope
|
||||
|
||||
- TTF 바이너리 파싱 (head, hhea, maxp, cmap, loca, glyf, hmtx)
|
||||
- Quadratic bezier 아웃라인 → 비트맵 래스터라이징
|
||||
- 온디맨드 글리프 캐시 (1024x1024 R8Unorm 아틀라스)
|
||||
- 가변폭 텍스트 렌더링 (ASCII + 한글 + 유니코드)
|
||||
- UiContext에 옵션 통합
|
||||
|
||||
## TtfParser
|
||||
|
||||
TTF 바이너리에서 필요한 테이블을 파싱한다.
|
||||
|
||||
```rust
|
||||
pub struct TtfParser {
|
||||
data: Vec<u8>,
|
||||
// 테이블 오프셋 캐시
|
||||
head_offset: usize,
|
||||
hhea_offset: usize,
|
||||
maxp_offset: usize,
|
||||
cmap_offset: usize,
|
||||
loca_offset: usize,
|
||||
glyf_offset: usize,
|
||||
hmtx_offset: usize,
|
||||
// 헤더 정보
|
||||
pub units_per_em: u16,
|
||||
pub num_glyphs: u16,
|
||||
pub ascender: i16,
|
||||
pub descender: i16,
|
||||
pub line_gap: i16,
|
||||
num_h_metrics: u16,
|
||||
loca_format: i16, // 0=short, 1=long
|
||||
}
|
||||
```
|
||||
|
||||
### 파싱 순서
|
||||
1. Offset Table: sfVersion, numTables → 테이블 디렉토리 순회
|
||||
2. 각 테이블 태그(4바이트 ASCII)와 offset/length 기록
|
||||
3. head: units_per_em, indexToLocFormat
|
||||
4. hhea: ascender, descender, lineGap, numberOfHMetrics
|
||||
5. maxp: numGlyphs
|
||||
6. cmap: Format 4 (BMP) 또는 Format 12 (full Unicode) 서브테이블
|
||||
|
||||
### 주요 메서드
|
||||
- `parse(data: &[u8]) -> Result<TtfParser, String>`
|
||||
- `glyph_index(codepoint: u32) -> u16` — cmap 조회
|
||||
- `glyph_outline(glyph_id: u16) -> GlyphOutline` — glyf 테이블에서 아웃라인 추출
|
||||
- `glyph_metrics(glyph_id: u16) -> GlyphMetrics` — hmtx에서 advance/bearing
|
||||
|
||||
### GlyphOutline
|
||||
```rust
|
||||
pub struct GlyphOutline {
|
||||
pub contours: Vec<Vec<OutlinePoint>>,
|
||||
pub x_min: i16,
|
||||
pub y_min: i16,
|
||||
pub x_max: i16,
|
||||
pub y_max: i16,
|
||||
}
|
||||
|
||||
pub struct OutlinePoint {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub on_curve: bool,
|
||||
}
|
||||
|
||||
pub struct GlyphMetrics {
|
||||
pub advance_width: u16,
|
||||
pub left_side_bearing: i16,
|
||||
}
|
||||
```
|
||||
|
||||
### glyf 테이블 파싱
|
||||
- Simple glyph: numberOfContours >= 0
|
||||
- endPtsOfContours, flags, x/y 좌표 (delta 인코딩)
|
||||
- on_curve 플래그로 직선/곡선 구분
|
||||
- off-curve 점 사이에 암시적 on-curve 점 삽입
|
||||
- Compound glyph: numberOfContours < 0
|
||||
- 여러 simple glyph를 변환(translate/scale)하여 합성
|
||||
- 재귀적 처리
|
||||
|
||||
### cmap Format 4
|
||||
BMP(U+0000~U+FFFF) 매핑:
|
||||
- segCount, endCode[], startCode[], idDelta[], idRangeOffset[] 배열
|
||||
- 이진 탐색으로 codepoint가 속하는 세그먼트 찾기
|
||||
- idRangeOffset == 0: glyph = (codepoint + idDelta) % 65536
|
||||
- idRangeOffset != 0: glyphIdArray에서 조회
|
||||
|
||||
## Rasterizer
|
||||
|
||||
글리프 아웃라인을 알파 비트맵으로 변환한다.
|
||||
|
||||
### 입력
|
||||
- GlyphOutline (contours of points)
|
||||
- font_size (pixel height)
|
||||
- units_per_em
|
||||
|
||||
### 스케일링
|
||||
```
|
||||
scale = font_size / units_per_em
|
||||
pixel_x = (point.x - x_min) * scale
|
||||
pixel_y = (y_max - point.y) * scale // Y 뒤집기 (TTF는 Y-up)
|
||||
```
|
||||
|
||||
### Bezier Flattening
|
||||
Quadratic bezier (P0, P1, P2)를 선분으로 분할:
|
||||
- 재귀 분할: 중간점과 직선 거리가 threshold(0.5px) 이하이면 직선으로 근사
|
||||
- 결과: Vec<(f32, f32)> 선분 목록
|
||||
|
||||
### 스캔라인 래스터라이징
|
||||
1. 모든 contour를 선분 목록으로 변환
|
||||
2. 각 스캔라인 (y = 0..height):
|
||||
- 모든 선분과 교차점 계산
|
||||
- 교차점을 x 기준 정렬
|
||||
- winding rule (non-zero): 교차할 때 winding number 증감
|
||||
- winding != 0인 구간을 채움
|
||||
|
||||
### 안티앨리어싱
|
||||
- 서브픽셀 없이 단순 coverage: 픽셀 경계에서의 부분 커버리지 계산
|
||||
- 또는 4x 수직 슈퍼샘플링 (y를 4등분, 평균)
|
||||
|
||||
### 출력
|
||||
```rust
|
||||
pub fn rasterize(outline: &GlyphOutline, scale: f32) -> RasterResult
|
||||
|
||||
pub struct RasterResult {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub bitmap: Vec<u8>, // R8 알파맵
|
||||
pub offset_x: f32, // bearing
|
||||
pub offset_y: f32, // baseline offset
|
||||
}
|
||||
```
|
||||
|
||||
## GlyphCache
|
||||
|
||||
온디맨드 아틀라스를 관리한다.
|
||||
|
||||
```rust
|
||||
pub struct GlyphCache {
|
||||
atlas_data: Vec<u8>, // CPU측 R8 버퍼 (텍스처 업로드용)
|
||||
atlas_width: u32, // 1024
|
||||
atlas_height: u32, // 1024
|
||||
glyphs: HashMap<char, GlyphInfo>,
|
||||
cursor_x: u32,
|
||||
cursor_y: u32,
|
||||
row_height: u32,
|
||||
dirty: bool, // GPU 업로드 필요 여부
|
||||
}
|
||||
|
||||
pub struct GlyphInfo {
|
||||
pub uv: [f32; 4], // u0, v0, u1, v1
|
||||
pub width: f32,
|
||||
pub height: f32,
|
||||
pub advance: f32,
|
||||
pub bearing_x: f32,
|
||||
pub bearing_y: f32,
|
||||
}
|
||||
```
|
||||
|
||||
### 글리프 삽입 로직
|
||||
1. `glyphs.get(ch)` 로 캐시 확인
|
||||
2. 없으면: parser.glyph_index → parser.glyph_outline → rasterize
|
||||
3. 아틀라스에 비트맵 복사 (cursor_x, cursor_y 위치)
|
||||
4. cursor_x 전진. 줄 넘김 시 cursor_y += row_height, cursor_x = 0
|
||||
5. GlyphInfo 생성 (UV 좌표 계산)
|
||||
6. dirty = true
|
||||
|
||||
### GPU 업로드
|
||||
- `upload_if_dirty(queue, texture)` — dirty면 queue.write_texture로 전체 아틀라스 업로드
|
||||
- 또는 변경된 영역만 부분 업로드 (최적화, 나중에)
|
||||
|
||||
## TtfFont
|
||||
|
||||
통합 인터페이스.
|
||||
|
||||
```rust
|
||||
pub struct TtfFont {
|
||||
parser: TtfParser,
|
||||
cache: GlyphCache,
|
||||
pub font_size: f32,
|
||||
pub line_height: f32,
|
||||
pub ascender: f32,
|
||||
pub descender: f32,
|
||||
scale: f32,
|
||||
}
|
||||
```
|
||||
|
||||
### 메서드
|
||||
- `new(data: &[u8], font_size: f32) -> Result<Self, String>`
|
||||
- `glyph(&mut self, ch: char) -> &GlyphInfo`
|
||||
- `text_width(&mut self, text: &str) -> f32`
|
||||
- `atlas_data(&self) -> &[u8]` — CPU 아틀라스 데이터
|
||||
- `atlas_size(&self) -> (u32, u32)`
|
||||
- `is_dirty(&self) -> bool`
|
||||
- `clear_dirty(&mut self)`
|
||||
|
||||
## UI 통합
|
||||
|
||||
### UiContext 변경
|
||||
```rust
|
||||
pub struct UiContext {
|
||||
// 기존 필드...
|
||||
pub font: FontAtlas, // 기존 비트맵 폰트 유지
|
||||
pub ttf_font: Option<TtfFont>, // TTF 폰트 (옵션)
|
||||
}
|
||||
```
|
||||
|
||||
### 텍스트 렌더링 변경
|
||||
기존 위젯(text, button, slider 등)에서 텍스트를 그릴 때:
|
||||
- `ttf_font`이 Some이면 → TTF 글리프 사용 (가변폭)
|
||||
- None이면 → 기존 비트맵 폰트 폴백
|
||||
|
||||
이를 위한 헬퍼:
|
||||
```rust
|
||||
impl UiContext {
|
||||
/// 텍스트 너비 계산 (TTF or 비트맵)
|
||||
pub fn text_width(&mut self, text: &str) -> f32;
|
||||
|
||||
/// 텍스트 렌더링 (TTF or 비트맵)
|
||||
pub fn draw_text(&mut self, text: &str, x: f32, y: f32, color: [u8; 4]);
|
||||
}
|
||||
```
|
||||
|
||||
기존 위젯 코드에서 인라인 글리프 렌더링을 `draw_text()` 호출로 교체하면 TTF/비트맵 전환이 투명해짐.
|
||||
|
||||
### UiRenderer 변경
|
||||
- TTF 아틀라스 텍스처 생성 + 바인드 그룹
|
||||
- 매 프레임 dirty 체크 → 업로드
|
||||
- 기존 ui_shader.wgsl 재사용 가능 (R8 알파 텍스처 동일 패턴)
|
||||
- DrawCommand에 텍스처 종류 플래그 추가 (bitmap vs ttf) 또는 별도 draw call
|
||||
|
||||
## File Structure
|
||||
|
||||
- `crates/voltex_editor/src/ttf_parser.rs` — TTF 바이너리 파싱
|
||||
- `crates/voltex_editor/src/rasterizer.rs` — 아웃라인 → 비트맵
|
||||
- `crates/voltex_editor/src/glyph_cache.rs` — 온디맨드 아틀라스
|
||||
- `crates/voltex_editor/src/ttf_font.rs` — 통합 인터페이스
|
||||
- `crates/voltex_editor/src/lib.rs` — 모듈 추가
|
||||
|
||||
## Testing
|
||||
|
||||
### TtfParser (순수 바이트 파싱, GPU 불필요)
|
||||
- 테이블 디렉토리 파싱
|
||||
- cmap 매핑: 'A'(0x41) → 올바른 glyph index
|
||||
- 글리프 아웃라인: 알려진 글리프의 contour 수, 점 수 검증
|
||||
- hmtx: advance width 검증
|
||||
- 에러 케이스: 잘못된 데이터
|
||||
|
||||
### Rasterizer (순수 수학, GPU 불필요)
|
||||
- 직선 contour (사각형) → 비트맵 검증
|
||||
- Bezier flatten: 곡선이 선분으로 올바르게 분할되는지
|
||||
- 빈 글리프(space 등) → 0x0 비트맵
|
||||
- 스케일링 정확성
|
||||
|
||||
### GlyphCache (GPU 불필요 — CPU측만 테스트)
|
||||
- 글리프 삽입 → UV 좌표 검증
|
||||
- 중복 삽입 → 캐시 히트
|
||||
- 아틀라스 가득 참 → 줄 넘김 동작
|
||||
|
||||
### TtfFont (통합)
|
||||
- text_width: "Hello" 너비 > 0
|
||||
- 한글 글리프 래스터라이즈 성공
|
||||
|
||||
### 테스트 TTF 파일
|
||||
NotoSansMono-Regular.ttf 등 오픈소스 폰트를 `assets/fonts/` 에 포함하거나, 테스트에서 시스템 폰트 경로 사용.
|
||||
@@ -7,6 +7,8 @@ edition = "2021"
|
||||
voltex_platform.workspace = true
|
||||
voltex_renderer.workspace = true
|
||||
voltex_editor.workspace = true
|
||||
voltex_math.workspace = true
|
||||
voltex_ecs.workspace = true
|
||||
wgpu.workspace = true
|
||||
winit.workspace = true
|
||||
bytemuck.workspace = true
|
||||
|
||||
@@ -6,8 +6,18 @@ use winit::{
|
||||
window::WindowId,
|
||||
};
|
||||
use voltex_platform::{VoltexWindow, WindowConfig, InputState, GameTimer};
|
||||
use voltex_renderer::GpuContext;
|
||||
use voltex_editor::{UiContext, UiRenderer};
|
||||
use voltex_renderer::{GpuContext, Mesh, MeshVertex, CameraUniform, LightUniform, GpuTexture};
|
||||
use voltex_editor::{
|
||||
UiContext, UiRenderer, DockTree, DockNode, Axis, Rect,
|
||||
OrbitCamera, ViewportTexture, ViewportRenderer, VIEWPORT_COLOR_FORMAT,
|
||||
hierarchy_panel, inspector_panel,
|
||||
AssetBrowser, asset_browser_panel,
|
||||
};
|
||||
use voltex_ecs::world::World;
|
||||
use voltex_ecs::entity::Entity;
|
||||
use voltex_ecs::transform::Transform;
|
||||
use voltex_ecs::scene::Tag;
|
||||
use voltex_math::{Mat4, Vec3};
|
||||
|
||||
struct EditorDemoApp {
|
||||
state: Option<AppState>,
|
||||
@@ -20,10 +30,190 @@ struct AppState {
|
||||
timer: GameTimer,
|
||||
ui: UiContext,
|
||||
ui_renderer: UiRenderer,
|
||||
dock: DockTree,
|
||||
// Widget state
|
||||
counter: u32,
|
||||
speed: f32,
|
||||
show_grid: bool,
|
||||
// ECS
|
||||
world: World,
|
||||
selected_entity: Option<Entity>,
|
||||
tag_buffer: String,
|
||||
// Viewport state
|
||||
orbit_cam: OrbitCamera,
|
||||
viewport_tex: ViewportTexture,
|
||||
viewport_renderer: ViewportRenderer,
|
||||
// 3D scene
|
||||
scene_pipeline: wgpu::RenderPipeline,
|
||||
camera_buffer: wgpu::Buffer,
|
||||
light_buffer: wgpu::Buffer,
|
||||
camera_light_layout: wgpu::BindGroupLayout,
|
||||
#[allow(dead_code)]
|
||||
texture_layout: wgpu::BindGroupLayout,
|
||||
dummy_texture_bg: wgpu::BindGroup,
|
||||
scene_meshes: Vec<Mesh>,
|
||||
// Asset browser
|
||||
asset_browser: AssetBrowser,
|
||||
// Mouse tracking
|
||||
prev_mouse: (f32, f32),
|
||||
left_dragging: bool,
|
||||
middle_dragging: bool,
|
||||
scroll_delta: f32,
|
||||
}
|
||||
|
||||
fn camera_light_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Camera+Light BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
fn texture_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Texture BGL"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
fn create_scene_pipeline(
|
||||
device: &wgpu::Device,
|
||||
cl_layout: &wgpu::BindGroupLayout,
|
||||
tex_layout: &wgpu::BindGroupLayout,
|
||||
) -> wgpu::RenderPipeline {
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Scene Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("../../../crates/voltex_renderer/src/mesh_shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Scene Pipeline Layout"),
|
||||
bind_group_layouts: &[cl_layout, tex_layout],
|
||||
immediate_size: 0,
|
||||
});
|
||||
|
||||
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Scene Pipeline"),
|
||||
layout: Some(&layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[MeshVertex::LAYOUT],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: VIEWPORT_COLOR_FORMAT,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
strip_index_format: None,
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
cull_mode: Some(wgpu::Face::Back),
|
||||
polygon_mode: wgpu::PolygonMode::Fill,
|
||||
unclipped_depth: false,
|
||||
conservative: false,
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: wgpu::TextureFormat::Depth32Float,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::Less,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState { count: 1, mask: !0, alpha_to_coverage_enabled: false },
|
||||
multiview_mask: None,
|
||||
cache: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn make_cube(device: &wgpu::Device, cx: f32, cy: f32, cz: f32, size: f32) -> Mesh {
|
||||
let s = size * 0.5;
|
||||
let mut verts = Vec::new();
|
||||
let mut indices = Vec::new();
|
||||
|
||||
let mut add_face = |positions: [[f32; 3]; 4], normal: [f32; 3]| {
|
||||
let base = verts.len() as u32;
|
||||
for pos in &positions {
|
||||
verts.push(MeshVertex {
|
||||
position: [pos[0] + cx, pos[1] + cy, pos[2] + cz],
|
||||
normal,
|
||||
uv: [0.0, 0.0],
|
||||
tangent: [1.0, 0.0, 0.0, 1.0],
|
||||
});
|
||||
}
|
||||
indices.extend_from_slice(&[base, base+1, base+2, base, base+2, base+3]);
|
||||
};
|
||||
|
||||
// +Y (top)
|
||||
add_face([[-s,s,-s], [s,s,-s], [s,s,s], [-s,s,s]], [0.0, 1.0, 0.0]);
|
||||
// -Y (bottom)
|
||||
add_face([[-s,-s,s], [s,-s,s], [s,-s,-s], [-s,-s,-s]], [0.0, -1.0, 0.0]);
|
||||
// +Z (front)
|
||||
add_face([[-s,-s,s], [-s,s,s], [s,s,s], [s,-s,s]], [0.0, 0.0, 1.0]);
|
||||
// -Z (back)
|
||||
add_face([[s,-s,-s], [s,s,-s], [-s,s,-s], [-s,-s,-s]], [0.0, 0.0, -1.0]);
|
||||
// +X (right)
|
||||
add_face([[s,-s,s], [s,s,s], [s,s,-s], [s,-s,-s]], [1.0, 0.0, 0.0]);
|
||||
// -X (left)
|
||||
add_face([[-s,-s,-s], [-s,s,-s], [-s,s,s], [-s,-s,s]], [-1.0, 0.0, 0.0]);
|
||||
|
||||
Mesh::new(device, &verts, &indices)
|
||||
}
|
||||
|
||||
fn make_ground(device: &wgpu::Device) -> Mesh {
|
||||
let s = 5.0;
|
||||
let verts = vec![
|
||||
MeshVertex { position: [-s, 0.0, -s], normal: [0.0, 1.0, 0.0], uv: [0.0, 0.0], tangent: [1.0, 0.0, 0.0, 1.0] },
|
||||
MeshVertex { position: [s, 0.0, -s], normal: [0.0, 1.0, 0.0], uv: [1.0, 0.0], tangent: [1.0, 0.0, 0.0, 1.0] },
|
||||
MeshVertex { position: [s, 0.0, s], normal: [0.0, 1.0, 0.0], uv: [1.0, 1.0], tangent: [1.0, 0.0, 0.0, 1.0] },
|
||||
MeshVertex { position: [-s, 0.0, s], normal: [0.0, 1.0, 0.0], uv: [0.0, 1.0], tangent: [1.0, 0.0, 0.0, 1.0] },
|
||||
];
|
||||
let indices = vec![0, 1, 2, 0, 2, 3];
|
||||
Mesh::new(device, &verts, &indices)
|
||||
}
|
||||
|
||||
impl ApplicationHandler for EditorDemoApp {
|
||||
@@ -37,17 +227,91 @@ impl ApplicationHandler for EditorDemoApp {
|
||||
let window = VoltexWindow::new(event_loop, &config);
|
||||
let gpu = GpuContext::new(window.handle.clone());
|
||||
|
||||
let ui = UiContext::new(
|
||||
gpu.config.width as f32,
|
||||
gpu.config.height as f32,
|
||||
);
|
||||
let ui_renderer = UiRenderer::new(
|
||||
&gpu.device,
|
||||
&gpu.queue,
|
||||
gpu.surface_format,
|
||||
&ui.font,
|
||||
let mut ui = UiContext::new(gpu.config.width as f32, gpu.config.height as f32);
|
||||
let ui_renderer = UiRenderer::new(&gpu.device, &gpu.queue, gpu.surface_format, &ui.font);
|
||||
|
||||
// Try to load TTF font
|
||||
let ttf_data = std::fs::read("C:/Windows/Fonts/arial.ttf")
|
||||
.or_else(|_| std::fs::read("C:/Windows/Fonts/consola.ttf"))
|
||||
.or_else(|_| std::fs::read("C:/Windows/Fonts/malgun.ttf"))
|
||||
.ok();
|
||||
if let Some(data) = ttf_data {
|
||||
if let Ok(font) = voltex_editor::TtfFont::new(&data, 14.0) {
|
||||
ui.ttf_font = Some(font);
|
||||
}
|
||||
}
|
||||
|
||||
let dock = DockTree::new(
|
||||
DockNode::split(
|
||||
Axis::Horizontal, 0.25,
|
||||
DockNode::leaf(vec![0]),
|
||||
DockNode::split(
|
||||
Axis::Vertical, 0.7,
|
||||
DockNode::leaf(vec![1]),
|
||||
DockNode::Leaf { tabs: vec![2, 3], active: 0 },
|
||||
),
|
||||
),
|
||||
vec!["Hierarchy", "Viewport", "Inspector", "Assets"],
|
||||
);
|
||||
|
||||
let asset_browser = AssetBrowser::new(std::env::current_dir().unwrap_or_default());
|
||||
|
||||
// Viewport
|
||||
let orbit_cam = OrbitCamera::new();
|
||||
let viewport_tex = ViewportTexture::new(&gpu.device, 640, 480);
|
||||
let viewport_renderer = ViewportRenderer::new(&gpu.device, gpu.surface_format);
|
||||
|
||||
// 3D scene setup
|
||||
let cl_layout = camera_light_bind_group_layout(&gpu.device);
|
||||
let tex_layout = texture_bind_group_layout(&gpu.device);
|
||||
let scene_pipeline = create_scene_pipeline(&gpu.device, &cl_layout, &tex_layout);
|
||||
|
||||
let camera_uniform = CameraUniform::new();
|
||||
let light_uniform = LightUniform::new();
|
||||
|
||||
use wgpu::util::DeviceExt;
|
||||
let camera_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Camera Buffer"),
|
||||
contents: bytemuck::cast_slice(&[camera_uniform]),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
let light_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Light Buffer"),
|
||||
contents: bytemuck::cast_slice(&[light_uniform]),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
// Dummy white texture for mesh shader group(1)
|
||||
let dummy_texture = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &tex_layout);
|
||||
|
||||
// Scene meshes
|
||||
let ground = make_ground(&gpu.device);
|
||||
let cube1 = make_cube(&gpu.device, 0.0, 0.5, 0.0, 1.0);
|
||||
let cube2 = make_cube(&gpu.device, 2.0, 0.5, 1.0, 1.0);
|
||||
let cube3 = make_cube(&gpu.device, -1.5, 0.5, -1.0, 1.0);
|
||||
|
||||
// ECS World setup
|
||||
let mut world = World::new();
|
||||
|
||||
let ground_e = world.spawn();
|
||||
world.add(ground_e, Transform::from_position(Vec3::new(0.0, 0.0, 0.0)));
|
||||
world.add(ground_e, Tag("Ground".to_string()));
|
||||
|
||||
let cube1_e = world.spawn();
|
||||
world.add(cube1_e, Transform::from_position(Vec3::new(0.0, 0.5, 0.0)));
|
||||
world.add(cube1_e, Tag("Cube1".to_string()));
|
||||
|
||||
let cube2_e = world.spawn();
|
||||
world.add(cube2_e, Transform::from_position(Vec3::new(2.0, 0.5, 1.0)));
|
||||
world.add(cube2_e, Tag("Cube2".to_string()));
|
||||
|
||||
let cube3_e = world.spawn();
|
||||
world.add(cube3_e, Transform::from_position(Vec3::new(-1.5, 0.5, -1.0)));
|
||||
world.add(cube3_e, Tag("Cube3".to_string()));
|
||||
|
||||
// Cube2 is child of Cube1
|
||||
voltex_ecs::hierarchy::add_child(&mut world, cube1_e, cube2_e);
|
||||
|
||||
self.state = Some(AppState {
|
||||
window,
|
||||
gpu,
|
||||
@@ -55,9 +319,28 @@ impl ApplicationHandler for EditorDemoApp {
|
||||
timer: GameTimer::new(60),
|
||||
ui,
|
||||
ui_renderer,
|
||||
dock,
|
||||
counter: 0,
|
||||
speed: 5.0,
|
||||
show_grid: true,
|
||||
orbit_cam,
|
||||
viewport_tex,
|
||||
viewport_renderer,
|
||||
scene_pipeline,
|
||||
camera_buffer,
|
||||
light_buffer,
|
||||
camera_light_layout: cl_layout,
|
||||
texture_layout: tex_layout,
|
||||
dummy_texture_bg: dummy_texture.bind_group,
|
||||
asset_browser,
|
||||
scene_meshes: vec![ground, cube1, cube2, cube3],
|
||||
world,
|
||||
selected_entity: None,
|
||||
tag_buffer: String::new(),
|
||||
prev_mouse: (0.0, 0.0),
|
||||
left_dragging: false,
|
||||
middle_dragging: false,
|
||||
scroll_delta: 0.0,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -101,6 +384,11 @@ impl ApplicationHandler for EditorDemoApp {
|
||||
WindowEvent::MouseInput { state: btn_state, button, .. } => {
|
||||
let pressed = btn_state == winit::event::ElementState::Pressed;
|
||||
state.input.process_mouse_button(button, pressed);
|
||||
match button {
|
||||
winit::event::MouseButton::Left => state.left_dragging = pressed,
|
||||
winit::event::MouseButton::Middle => state.middle_dragging = pressed,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
WindowEvent::MouseWheel { delta, .. } => {
|
||||
@@ -109,41 +397,70 @@ impl ApplicationHandler for EditorDemoApp {
|
||||
winit::event::MouseScrollDelta::PixelDelta(pos) => pos.y as f32,
|
||||
};
|
||||
state.input.process_scroll(y);
|
||||
state.scroll_delta = y;
|
||||
}
|
||||
|
||||
WindowEvent::RedrawRequested => {
|
||||
state.timer.tick();
|
||||
state.input.begin_frame();
|
||||
|
||||
// Gather mouse state
|
||||
let (mx, my) = state.input.mouse_position();
|
||||
let mouse_down = state.input.is_mouse_button_pressed(
|
||||
winit::event::MouseButton::Left,
|
||||
);
|
||||
let mx = mx as f32;
|
||||
let my = my as f32;
|
||||
let mouse_down = state.input.is_mouse_button_pressed(winit::event::MouseButton::Left);
|
||||
|
||||
let dt = state.timer.frame_dt();
|
||||
let fps = if dt > 0.0 { 1.0 / dt } else { 0.0 };
|
||||
|
||||
// Begin UI frame
|
||||
state.ui.begin_frame(mx as f32, my as f32, mouse_down);
|
||||
state.ui.begin_frame(mx, my, mouse_down);
|
||||
|
||||
let screen_w = state.gpu.config.width as f32;
|
||||
let screen_h = state.gpu.config.height as f32;
|
||||
let areas = state.dock.layout(Rect { x: 0.0, y: 0.0, w: screen_w, h: screen_h });
|
||||
state.dock.update(mx, my, mouse_down);
|
||||
state.dock.draw_chrome(&mut state.ui);
|
||||
|
||||
// Draw widgets inside a panel
|
||||
state.ui.begin_panel("Debug", 10.0, 10.0, 250.0, 400.0);
|
||||
state.ui.text("Voltex Editor Demo");
|
||||
state.ui.text(&format!("FPS: {:.0}", fps));
|
||||
// Find viewport rect for later 3D rendering
|
||||
let mut viewport_rect: Option<Rect> = None;
|
||||
|
||||
if state.ui.button("Click Me") {
|
||||
state.counter += 1;
|
||||
for (panel_id, rect) in &areas {
|
||||
match panel_id {
|
||||
0 => {
|
||||
// Hierarchy panel
|
||||
hierarchy_panel(&mut state.ui, &state.world, &mut state.selected_entity, rect);
|
||||
}
|
||||
1 => {
|
||||
// Viewport panel (keep existing 3D rendering code exactly as-is)
|
||||
viewport_rect = Some(*rect);
|
||||
}
|
||||
2 => {
|
||||
// Inspector panel
|
||||
inspector_panel(&mut state.ui, &mut state.world, state.selected_entity, rect, &mut state.tag_buffer);
|
||||
}
|
||||
3 => {
|
||||
asset_browser_panel(&mut state.ui, &mut state.asset_browser, rect);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
state.ui.text(&format!("Clicked: {}", state.counter));
|
||||
|
||||
state.speed = state.ui.slider("Speed", state.speed, 0.0, 10.0);
|
||||
state.show_grid = state.ui.checkbox("Show Grid", state.show_grid);
|
||||
|
||||
state.ui.end_panel();
|
||||
state.ui.end_frame();
|
||||
|
||||
// Orbit camera input (only when mouse is over viewport)
|
||||
if let Some(vr) = &viewport_rect {
|
||||
if vr.contains(mx, my) {
|
||||
let dx = mx - state.prev_mouse.0;
|
||||
let dy = my - state.prev_mouse.1;
|
||||
if state.left_dragging { state.orbit_cam.orbit(dx, dy); }
|
||||
if state.middle_dragging { state.orbit_cam.pan(dx, dy); }
|
||||
if state.scroll_delta.abs() > 0.0 {
|
||||
state.orbit_cam.zoom(state.scroll_delta);
|
||||
}
|
||||
}
|
||||
}
|
||||
state.prev_mouse = (mx, my);
|
||||
state.scroll_delta = 0.0;
|
||||
|
||||
// Acquire surface texture
|
||||
let output = match state.gpu.surface.get_current_texture() {
|
||||
Ok(t) => t,
|
||||
@@ -152,63 +469,119 @@ impl ApplicationHandler for EditorDemoApp {
|
||||
state.gpu.resize(w, h);
|
||||
return;
|
||||
}
|
||||
Err(wgpu::SurfaceError::OutOfMemory) => {
|
||||
event_loop.exit();
|
||||
return;
|
||||
}
|
||||
Err(wgpu::SurfaceError::OutOfMemory) => { event_loop.exit(); return; }
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let view = output.texture.create_view(
|
||||
&wgpu::TextureViewDescriptor::default(),
|
||||
);
|
||||
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
let mut encoder = state.gpu.device.create_command_encoder(
|
||||
&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("Editor Demo Encoder"),
|
||||
},
|
||||
&wgpu::CommandEncoderDescriptor { label: Some("Editor Encoder") },
|
||||
);
|
||||
|
||||
// Clear the background
|
||||
// Render 3D scene to viewport texture
|
||||
if let Some(vr) = &viewport_rect {
|
||||
state.viewport_tex.ensure_size(&state.gpu.device, vr.w.max(1.0) as u32, vr.h.max(1.0) as u32);
|
||||
|
||||
// Update camera uniform
|
||||
let aspect = vr.w / vr.h.max(1.0);
|
||||
let vp = state.orbit_cam.view_projection(aspect);
|
||||
let cam_pos = state.orbit_cam.position();
|
||||
|
||||
let camera_uniform = CameraUniform {
|
||||
view_proj: vp.cols,
|
||||
model: Mat4::IDENTITY.cols,
|
||||
camera_pos: [cam_pos.x, cam_pos.y, cam_pos.z],
|
||||
_padding: 0.0,
|
||||
};
|
||||
state.gpu.queue.write_buffer(&state.camera_buffer, 0, bytemuck::cast_slice(&[camera_uniform]));
|
||||
|
||||
let light_uniform = LightUniform {
|
||||
direction: [0.3, -1.0, -0.5],
|
||||
_padding1: 0.0,
|
||||
color: [1.0, 0.95, 0.9],
|
||||
ambient_strength: 0.15,
|
||||
};
|
||||
state.gpu.queue.write_buffer(&state.light_buffer, 0, bytemuck::cast_slice(&[light_uniform]));
|
||||
|
||||
// Create bind group for this frame
|
||||
let camera_light_bg = state.gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Scene Camera+Light BG"),
|
||||
layout: &state.camera_light_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry { binding: 0, resource: state.camera_buffer.as_entire_binding() },
|
||||
wgpu::BindGroupEntry { binding: 1, resource: state.light_buffer.as_entire_binding() },
|
||||
],
|
||||
});
|
||||
|
||||
// Render scene to offscreen texture
|
||||
{
|
||||
let _clear_pass = encoder.begin_render_pass(
|
||||
&wgpu::RenderPassDescriptor {
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Scene Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: &state.viewport_tex.color_view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.15, g: 0.15, b: 0.2, a: 1.0 }),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: &state.viewport_tex.depth_view,
|
||||
depth_ops: Some(wgpu::Operations { load: wgpu::LoadOp::Clear(1.0), store: wgpu::StoreOp::Store }),
|
||||
stencil_ops: None,
|
||||
}),
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
multiview_mask: None,
|
||||
});
|
||||
|
||||
rpass.set_pipeline(&state.scene_pipeline);
|
||||
rpass.set_bind_group(0, &camera_light_bg, &[]);
|
||||
rpass.set_bind_group(1, &state.dummy_texture_bg, &[]);
|
||||
|
||||
for mesh in &state.scene_meshes {
|
||||
rpass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
|
||||
rpass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
|
||||
rpass.draw_indexed(0..mesh.num_indices, 0, 0..1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the surface background
|
||||
{
|
||||
let _clear_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Clear Pass"),
|
||||
color_attachments: &[Some(
|
||||
wgpu::RenderPassColorAttachment {
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: &view,
|
||||
resolve_target: None,
|
||||
depth_slice: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color {
|
||||
r: 0.12,
|
||||
g: 0.12,
|
||||
b: 0.15,
|
||||
a: 1.0,
|
||||
}),
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.12, g: 0.12, b: 0.15, a: 1.0 }),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
},
|
||||
)],
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
multiview_mask: None,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Blit viewport texture to surface
|
||||
if let Some(vr) = &viewport_rect {
|
||||
state.viewport_renderer.render(
|
||||
&state.gpu.device, &state.gpu.queue, &mut encoder, &view,
|
||||
&state.viewport_tex.color_view,
|
||||
screen_w, screen_h,
|
||||
vr.x, vr.y, vr.w, vr.h,
|
||||
);
|
||||
// Pass drops here, clearing the surface
|
||||
}
|
||||
|
||||
// Render UI overlay
|
||||
let screen_w = state.gpu.config.width as f32;
|
||||
let screen_h = state.gpu.config.height as f32;
|
||||
state.ui_renderer.render(
|
||||
&state.gpu.device,
|
||||
&state.gpu.queue,
|
||||
&mut encoder,
|
||||
&view,
|
||||
&state.ui.draw_list,
|
||||
screen_w,
|
||||
screen_h,
|
||||
&state.gpu.device, &state.gpu.queue, &mut encoder, &view,
|
||||
&state.ui.draw_list, screen_w, screen_h,
|
||||
);
|
||||
|
||||
state.gpu.queue.submit(std::iter::once(encoder.finish()));
|
||||
|
||||
Reference in New Issue
Block a user